2 * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
5 /* Now we have all buffers that must be used in balancing of the tree */
6 /* Further calculations can not cause schedule(), and thus the buffer */
7 /* tree will be stable until the balancing will be finished */
8 /* balance the tree according to the analysis made before, */
9 /* and using buffers obtained after all above. */
12 ** balance_leaf_when_delete
18 #include <linux/config.h>
19 #include <asm/uaccess.h>
20 #include <linux/time.h>
21 #include <linux/reiserfs_fs.h>
22 #include <linux/buffer_head.h>
24 #ifdef CONFIG_REISERFS_CHECK
26 struct tree_balance
*cur_tb
= NULL
; /* detects whether more than one
27 copy of tb exists as a means
28 of checking whether schedule
29 is interrupting do_balance */
32 inline void do_balance_mark_leaf_dirty(struct tree_balance
*tb
,
33 struct buffer_head
*bh
, int flag
)
35 journal_mark_dirty(tb
->transaction_handle
,
36 tb
->transaction_handle
->t_super
, bh
);
39 #define do_balance_mark_internal_dirty do_balance_mark_leaf_dirty
40 #define do_balance_mark_sb_dirty do_balance_mark_leaf_dirty
43 if deleting something ( tb->insert_size[0] < 0 )
44 return(balance_leaf_when_delete()); (flag d handled here)
46 if lnum is larger than 0 we put items into the left node
47 if rnum is larger than 0 we put items into the right node
48 if snum1 is larger than 0 we put items into the new node s1
49 if snum2 is larger than 0 we put items into the new node s2
50 Note that all *num* count new items being created.
52 It would be easier to read balance_leaf() if each of these summary
53 lines was a separate procedure rather than being inlined. I think
54 that there are many passages here and in balance_leaf_when_delete() in
55 which two calls to one procedure can replace two passages, and it
56 might save cache space and improve software maintenance costs to do so.
58 Vladimir made the perceptive comment that we should offload most of
59 the decision making in this function into fix_nodes/check_balance, and
60 then create some sort of structure in tb that says what actions should
61 be performed by do_balance.
65 /* Balance leaf node in case of delete or cut: insert_size[0] < 0
67 * lnum, rnum can have values >= -1
68 * -1 means that the neighbor must be joined with S
69 * 0 means that nothing should be done with the neighbor
70 * >0 means to shift entirely or partly the specified number of items to the neighbor
72 static int balance_leaf_when_delete(struct tree_balance
*tb
, int flag
)
74 struct buffer_head
*tbS0
= PATH_PLAST_BUFFER(tb
->tb_path
);
75 int item_pos
= PATH_LAST_POSITION(tb
->tb_path
);
76 int pos_in_item
= tb
->tb_path
->pos_in_item
;
77 struct buffer_info bi
;
81 RFALSE(tb
->FR
[0] && B_LEVEL(tb
->FR
[0]) != DISK_LEAF_NODE_LEVEL
+ 1,
82 "vs- 12000: level: wrong FR %z", tb
->FR
[0]);
83 RFALSE(tb
->blknum
[0] > 1,
84 "PAP-12005: tb->blknum == %d, can not be > 1", tb
->blknum
[0]);
85 RFALSE(!tb
->blknum
[0] && !PATH_H_PPARENT(tb
->tb_path
, 0),
86 "PAP-12010: tree can not be empty");
88 ih
= B_N_PITEM_HEAD(tbS0
, item_pos
);
90 /* Delete or truncate the item */
93 case M_DELETE
: /* delete item in S[0] */
95 RFALSE(ih_item_len(ih
) + IH_SIZE
!= -tb
->insert_size
[0],
96 "vs-12013: mode Delete, insert size %d, ih to be deleted %h",
97 -tb
->insert_size
[0], ih
);
101 bi
.bi_parent
= PATH_H_PPARENT(tb
->tb_path
, 0);
102 bi
.bi_position
= PATH_H_POSITION(tb
->tb_path
, 1);
103 leaf_delete_items(&bi
, 0, item_pos
, 1, -1);
105 if (!item_pos
&& tb
->CFL
[0]) {
106 if (B_NR_ITEMS(tbS0
)) {
107 replace_key(tb
, tb
->CFL
[0], tb
->lkey
[0], tbS0
,
110 if (!PATH_H_POSITION(tb
->tb_path
, 1))
111 replace_key(tb
, tb
->CFL
[0], tb
->lkey
[0],
112 PATH_H_PPARENT(tb
->tb_path
,
117 RFALSE(!item_pos
&& !tb
->CFL
[0],
118 "PAP-12020: tb->CFL[0]==%p, tb->L[0]==%p", tb
->CFL
[0],
123 case M_CUT
:{ /* cut item in S[0] */
126 bi
.bi_parent
= PATH_H_PPARENT(tb
->tb_path
, 0);
127 bi
.bi_position
= PATH_H_POSITION(tb
->tb_path
, 1);
128 if (is_direntry_le_ih(ih
)) {
130 /* UFS unlink semantics are such that you can only delete one directory entry at a time. */
131 /* when we cut a directory tb->insert_size[0] means number of entries to be cut (always 1) */
132 tb
->insert_size
[0] = -1;
133 leaf_cut_from_buffer(&bi
, item_pos
, pos_in_item
,
134 -tb
->insert_size
[0]);
136 RFALSE(!item_pos
&& !pos_in_item
&& !tb
->CFL
[0],
137 "PAP-12030: can not change delimiting key. CFL[0]=%p",
140 if (!item_pos
&& !pos_in_item
&& tb
->CFL
[0]) {
141 replace_key(tb
, tb
->CFL
[0], tb
->lkey
[0],
145 leaf_cut_from_buffer(&bi
, item_pos
, pos_in_item
,
146 -tb
->insert_size
[0]);
148 RFALSE(!ih_item_len(ih
),
149 "PAP-12035: cut must leave non-zero dynamic length of item");
155 print_cur_tb("12040");
156 reiserfs_panic(tb
->tb_sb
,
157 "PAP-12040: balance_leaf_when_delete: unexpectable mode: %s(%d)",
159 M_PASTE
) ? "PASTE" : ((flag
==
160 M_INSERT
) ? "INSERT" :
164 /* the rule is that no shifting occurs unless by shifting a node can be freed */
165 n
= B_NR_ITEMS(tbS0
);
166 if (tb
->lnum
[0]) { /* L[0] takes part in balancing */
167 if (tb
->lnum
[0] == -1) { /* L[0] must be joined with S[0] */
168 if (tb
->rnum
[0] == -1) { /* R[0] must be also joined with S[0] */
169 if (tb
->FR
[0] == PATH_H_PPARENT(tb
->tb_path
, 0)) {
170 /* all contents of all the 3 buffers will be in L[0] */
171 if (PATH_H_POSITION(tb
->tb_path
, 1) == 0
172 && 1 < B_NR_ITEMS(tb
->FR
[0]))
173 replace_key(tb
, tb
->CFL
[0],
177 leaf_move_items(LEAF_FROM_S_TO_L
, tb
, n
,
179 leaf_move_items(LEAF_FROM_R_TO_L
, tb
,
180 B_NR_ITEMS(tb
->R
[0]),
183 reiserfs_invalidate_buffer(tb
, tbS0
);
184 reiserfs_invalidate_buffer(tb
,
189 /* all contents of all the 3 buffers will be in R[0] */
190 leaf_move_items(LEAF_FROM_S_TO_R
, tb
, n
, -1,
192 leaf_move_items(LEAF_FROM_L_TO_R
, tb
,
193 B_NR_ITEMS(tb
->L
[0]), -1, NULL
);
195 /* right_delimiting_key is correct in R[0] */
196 replace_key(tb
, tb
->CFR
[0], tb
->rkey
[0],
199 reiserfs_invalidate_buffer(tb
, tbS0
);
200 reiserfs_invalidate_buffer(tb
, tb
->L
[0]);
205 RFALSE(tb
->rnum
[0] != 0,
206 "PAP-12045: rnum must be 0 (%d)", tb
->rnum
[0]);
207 /* all contents of L[0] and S[0] will be in L[0] */
208 leaf_shift_left(tb
, n
, -1);
210 reiserfs_invalidate_buffer(tb
, tbS0
);
214 /* a part of contents of S[0] will be in L[0] and the rest part of S[0] will be in R[0] */
216 RFALSE((tb
->lnum
[0] + tb
->rnum
[0] < n
) ||
217 (tb
->lnum
[0] + tb
->rnum
[0] > n
+ 1),
218 "PAP-12050: rnum(%d) and lnum(%d) and item number(%d) in S[0] are not consistent",
219 tb
->rnum
[0], tb
->lnum
[0], n
);
220 RFALSE((tb
->lnum
[0] + tb
->rnum
[0] == n
) &&
221 (tb
->lbytes
!= -1 || tb
->rbytes
!= -1),
222 "PAP-12055: bad rbytes (%d)/lbytes (%d) parameters when items are not split",
223 tb
->rbytes
, tb
->lbytes
);
224 RFALSE((tb
->lnum
[0] + tb
->rnum
[0] == n
+ 1) &&
225 (tb
->lbytes
< 1 || tb
->rbytes
!= -1),
226 "PAP-12060: bad rbytes (%d)/lbytes (%d) parameters when items are split",
227 tb
->rbytes
, tb
->lbytes
);
229 leaf_shift_left(tb
, tb
->lnum
[0], tb
->lbytes
);
230 leaf_shift_right(tb
, tb
->rnum
[0], tb
->rbytes
);
232 reiserfs_invalidate_buffer(tb
, tbS0
);
237 if (tb
->rnum
[0] == -1) {
238 /* all contents of R[0] and S[0] will be in R[0] */
239 leaf_shift_right(tb
, n
, -1);
240 reiserfs_invalidate_buffer(tb
, tbS0
);
245 "PAP-12065: bad rnum parameter must be 0 (%d)", tb
->rnum
[0]);
249 static int balance_leaf(struct tree_balance
*tb
, struct item_head
*ih
, /* item header of inserted item (this is on little endian) */
250 const char *body
, /* body of inserted item or bytes to paste */
251 int flag
, /* i - insert, d - delete, c - cut, p - paste
252 (see comment to do_balance) */
253 struct item_head
*insert_key
, /* in our processing of one level we sometimes determine what
254 must be inserted into the next higher level. This insertion
255 consists of a key or two keys and their corresponding
257 struct buffer_head
**insert_ptr
/* inserted node-ptrs for the next level */
260 struct buffer_head
*tbS0
= PATH_PLAST_BUFFER(tb
->tb_path
);
261 int item_pos
= PATH_LAST_POSITION(tb
->tb_path
); /* index into the array of item headers in S[0]
262 of the affected item */
263 struct buffer_info bi
;
264 struct buffer_head
*S_new
[2]; /* new nodes allocated to hold what could not fit into S */
265 int snum
[2]; /* number of items that will be placed
266 into S_new (includes partially shifted
268 int sbytes
[2]; /* if an item is partially shifted into S_new then
269 if it is a directory item
270 it is the number of entries from the item that are shifted into S_new
272 it is the number of bytes from the item that are shifted into S_new
279 PROC_INFO_INC(tb
->tb_sb
, balance_at
[0]);
281 /* Make balance in case insert_size[0] < 0 */
282 if (tb
->insert_size
[0] < 0)
283 return balance_leaf_when_delete(tb
, flag
);
286 if (flag
== M_INSERT
&& body
== 0)
287 zeros_num
= ih_item_len(ih
);
289 pos_in_item
= tb
->tb_path
->pos_in_item
;
290 /* for indirect item pos_in_item is measured in unformatted node
291 pointers. Recalculate to bytes */
293 && is_indirect_le_ih(B_N_PITEM_HEAD(tbS0
, item_pos
)))
294 pos_in_item
*= UNFM_P_SIZE
;
296 if (tb
->lnum
[0] > 0) {
297 /* Shift lnum[0] items from S[0] to the left neighbor L[0] */
298 if (item_pos
< tb
->lnum
[0]) {
299 /* new item or it part falls to L[0], shift it too */
300 n
= B_NR_ITEMS(tb
->L
[0]);
303 case M_INSERT
: /* insert item into L[0] */
305 if (item_pos
== tb
->lnum
[0] - 1
306 && tb
->lbytes
!= -1) {
307 /* part of new item falls into L[0] */
312 leaf_shift_left(tb
, tb
->lnum
[0] - 1,
315 /* Calculate item length to insert to S[0] */
317 ih_item_len(ih
) - tb
->lbytes
;
318 /* Calculate and check item length to insert to L[0] */
323 RFALSE(ih_item_len(ih
) <= 0,
324 "PAP-12080: there is nothing to insert into L[0]: ih_item_len=%d",
327 /* Insert new item into L[0] */
330 bi
.bi_parent
= tb
->FL
[0];
332 get_left_neighbor_position(tb
, 0);
333 leaf_insert_into_buf(&bi
,
341 version
= ih_version(ih
);
343 /* Calculate key component, item length and body to insert into S[0] */
344 set_le_ih_k_offset(ih
,
354 put_ih_item_len(ih
, new_item_len
);
355 if (tb
->lbytes
> zeros_num
) {
357 (tb
->lbytes
- zeros_num
);
360 zeros_num
-= tb
->lbytes
;
362 RFALSE(ih_item_len(ih
) <= 0,
363 "PAP-12085: there is nothing to insert into S[0]: ih_item_len=%d",
366 /* new item in whole falls into L[0] */
367 /* Shift lnum[0]-1 items to L[0] */
369 leaf_shift_left(tb
, tb
->lnum
[0] - 1,
371 /* Insert new item into L[0] */
374 bi
.bi_parent
= tb
->FL
[0];
376 get_left_neighbor_position(tb
, 0);
377 leaf_insert_into_buf(&bi
,
381 tb
->insert_size
[0] = 0;
386 case M_PASTE
: /* append item in L[0] */
388 if (item_pos
== tb
->lnum
[0] - 1
389 && tb
->lbytes
!= -1) {
390 /* we must shift the part of the appended item */
391 if (is_direntry_le_ih
392 (B_N_PITEM_HEAD(tbS0
, item_pos
))) {
395 "PAP-12090: invalid parameter in case of a directory");
397 if (tb
->lbytes
> pos_in_item
) {
398 /* new directory entry falls into L[0] */
404 /* Shift lnum[0] - 1 items in whole. Shift lbytes - 1 entries from given directory item */
431 /* Append given directory entry to directory item */
437 get_left_neighbor_position
447 /* previous string prepared space for pasting new entry, following string pastes this entry */
449 /* when we have merge directory item, pos_in_item has been changed too */
451 /* paste new directory entry. 1 is entry number */
452 leaf_paste_entries(bi
.
471 tb
->insert_size
[0] = 0;
473 /* new directory item doesn't fall into L[0] */
474 /* Shift lnum[0]-1 items in whole. Shift lbytes directory entries from directory item number lnum[0] */
481 /* Calculate new position to append in item body */
482 pos_in_item
-= tb
->lbytes
;
485 RFALSE(tb
->lbytes
<= 0,
486 "PAP-12095: there is nothing to shift to L[0]. lbytes=%d",
488 RFALSE(pos_in_item
!=
492 "PAP-12100: incorrect position to paste: item_len=%d, pos_in_item=%d",
498 if (tb
->lbytes
>= pos_in_item
) {
499 /* appended item will be in L[0] in whole */
502 /* this bytes number must be appended to the last item of L[h] */
507 /* Calculate new insert_size[0] */
508 tb
->insert_size
[0] -=
514 "PAP-12105: there is nothing to paste into L[0]. insert_size=%d",
526 /* Append to body of item in L[0] */
532 get_left_neighbor_position
547 /* 0-th item in S0 can be only of DIRECT type when l_n != 0 */
558 "PAP-12106: item length must be 0");
569 "PAP-12107: items must be of the same file");
570 if (is_indirect_le_ih(B_N_PITEM_HEAD(tb
->L
[0], n
+ item_pos
- ret_val
))) {
580 /* update key of first item in S0 */
595 /* update left delimiting key */
613 /* Calculate new body, position in item and insert_size[0] */
614 if (l_n
> zeros_num
) {
633 !op_is_left_mergeable
637 !op_is_left_mergeable
642 "PAP-12120: item must be merge-able with left neighboring item");
643 } else { /* only part of the appended item will be in L[0] */
645 /* Calculate position in item for append in S[0] */
649 RFALSE(pos_in_item
<= 0,
650 "PAP-12125: no place for paste. pos_in_item=%d",
653 /* Shift lnum[0] - 1 items in whole. Shift lbytes - 1 byte from item number lnum[0] */
661 } else { /* appended item will be in L[0] in whole */
663 struct item_head
*pasted
;
665 if (!item_pos
&& op_is_left_mergeable(B_N_PKEY(tbS0
, 0), tbS0
->b_size
)) { /* if we paste into first item of S[0] and it is left mergable */
666 /* then increment pos_in_item by the size of the last item in L[0] */
668 B_N_PITEM_HEAD(tb
->L
[0],
670 if (is_direntry_le_ih(pasted
))
679 /* Shift lnum[0] - 1 items in whole. Shift lbytes - 1 byte from item number lnum[0] */
681 leaf_shift_left(tb
, tb
->lnum
[0],
683 /* Append to body of item in L[0] */
686 bi
.bi_parent
= tb
->FL
[0];
688 get_left_neighbor_position(tb
, 0);
689 leaf_paste_in_buffer(&bi
,
696 /* if appended item is directory, paste entry */
698 B_N_PITEM_HEAD(tb
->L
[0],
701 if (is_direntry_le_ih(pasted
))
702 leaf_paste_entries(bi
.bi_bh
,
717 /* if appended item is indirect item, put unformatted node into un list */
718 if (is_indirect_le_ih(pasted
))
719 set_ih_free_space(pasted
, 0);
720 tb
->insert_size
[0] = 0;
724 default: /* cases d and t */
725 reiserfs_panic(tb
->tb_sb
,
726 "PAP-12130: balance_leaf: lnum > 0: unexpectable mode: %s(%d)",
728 M_DELETE
) ? "DELETE" : ((flag
==
736 /* new item doesn't fall into L[0] */
737 leaf_shift_left(tb
, tb
->lnum
[0], tb
->lbytes
);
741 /* tb->lnum[0] > 0 */
742 /* Calculate new item position */
743 item_pos
-= (tb
->lnum
[0] - ((tb
->lbytes
!= -1) ? 1 : 0));
745 if (tb
->rnum
[0] > 0) {
746 /* shift rnum[0] items from S[0] to the right neighbor R[0] */
747 n
= B_NR_ITEMS(tbS0
);
750 case M_INSERT
: /* insert item */
751 if (n
- tb
->rnum
[0] < item_pos
) { /* new item or its part falls to R[0] */
752 if (item_pos
== n
- tb
->rnum
[0] + 1 && tb
->rbytes
!= -1) { /* part of new item falls into R[0] */
753 loff_t old_key_comp
, old_len
,
759 leaf_shift_right(tb
, tb
->rnum
[0] - 1,
762 version
= ih_version(ih
);
763 /* Remember key component and item length */
764 old_key_comp
= le_ih_k_offset(ih
);
765 old_len
= ih_item_len(ih
);
767 /* Calculate key component and item length to insert into R[0] */
772 rbytes
) << (is_indirect_le_ih(ih
)
776 set_le_ih_k_offset(ih
, offset
);
777 put_ih_item_len(ih
, tb
->rbytes
);
778 /* Insert part of the item into R[0] */
781 bi
.bi_parent
= tb
->FR
[0];
783 get_right_neighbor_position(tb
, 0);
784 if ((old_len
- tb
->rbytes
) > zeros_num
) {
793 zeros_num
- (old_len
-
795 zeros_num
-= r_zeros_number
;
798 leaf_insert_into_buf(&bi
, 0, ih
, r_body
,
801 /* Replace right delimiting key by first key in R[0] */
802 replace_key(tb
, tb
->CFR
[0], tb
->rkey
[0],
805 /* Calculate key component and item length to insert into S[0] */
806 set_le_ih_k_offset(ih
, old_key_comp
);
808 old_len
- tb
->rbytes
);
810 tb
->insert_size
[0] -= tb
->rbytes
;
812 } else { /* whole new item falls into R[0] */
814 /* Shift rnum[0]-1 items to R[0] */
819 /* Insert new item into R[0] */
822 bi
.bi_parent
= tb
->FR
[0];
824 get_right_neighbor_position(tb
, 0);
825 leaf_insert_into_buf(&bi
,
831 if (item_pos
- n
+ tb
->rnum
[0] - 1 == 0) {
832 replace_key(tb
, tb
->CFR
[0],
837 zeros_num
= tb
->insert_size
[0] = 0;
839 } else { /* new item or part of it doesn't fall into R[0] */
841 leaf_shift_right(tb
, tb
->rnum
[0], tb
->rbytes
);
845 case M_PASTE
: /* append item */
847 if (n
- tb
->rnum
[0] <= item_pos
) { /* pasted item or part of it falls to R[0] */
848 if (item_pos
== n
- tb
->rnum
[0] && tb
->rbytes
!= -1) { /* we must shift the part of the appended item */
849 if (is_direntry_le_ih(B_N_PITEM_HEAD(tbS0
, item_pos
))) { /* we append to directory item */
853 "PAP-12145: invalid parameter in case of a directory");
855 I_ENTRY_COUNT(B_N_PITEM_HEAD
858 if (entry_count
- tb
->rbytes
<
860 /* new directory entry falls into R[0] */
862 int paste_entry_position
;
864 RFALSE(tb
->rbytes
- 1 >=
868 "PAP-12150: no enough of entries to shift to R[0]: rbytes=%d, entry_count=%d",
871 /* Shift rnum[0]-1 items in whole. Shift rbytes-1 directory entries from directory item number rnum[0] */
879 /* Paste given directory entry to directory item */
880 paste_entry_position
=
889 get_right_neighbor_position
893 paste_entry_position
,
897 leaf_paste_entries(bi
.
900 paste_entry_position
,
914 if (paste_entry_position
916 /* change delimiting keys */
930 tb
->insert_size
[0] = 0;
932 } else { /* new directory entry doesn't fall into R[0] */
941 } else { /* regular object */
947 /* Calculate number of bytes which must be shifted from appended item */
950 tb
->insert_size
[0]) < 0)
953 RFALSE(pos_in_item
!=
957 "PAP-12155: invalid position to paste. ih_item_len=%d, pos_in_item=%d",
966 /* Calculate number of bytes which must remain in body after appending to R[0] */
974 unsigned long temp_rem
=
981 if (is_indirect_le_key
1016 /* k_offset (B_N_PKEY(tb->R[0],0)) += n_rem;
1017 k_offset (B_N_PDELIM_KEY(tb->CFR[0],tb->rkey[0])) += n_rem;*/
1018 do_balance_mark_internal_dirty
1019 (tb
, tb
->CFR
[0], 0);
1021 /* Append part of body into R[0] */
1023 bi
.bi_bh
= tb
->R
[0];
1024 bi
.bi_parent
= tb
->FR
[0];
1026 get_right_neighbor_position
1028 if (n_rem
> zeros_num
) {
1041 leaf_paste_in_buffer(&bi
, 0,
1050 if (is_indirect_le_ih
1055 "PAP-12160: paste more than one unformatted node pointer");
1061 tb
->insert_size
[0] = n_rem
;
1065 } else { /* pasted item in whole falls into R[0] */
1067 struct item_head
*pasted
;
1070 leaf_shift_right(tb
, tb
->rnum
[0],
1072 /* append item in R[0] */
1073 if (pos_in_item
>= 0) {
1075 bi
.bi_bh
= tb
->R
[0];
1076 bi
.bi_parent
= tb
->FR
[0];
1078 get_right_neighbor_position
1080 leaf_paste_in_buffer(&bi
,
1092 /* paste new entry, if item is directory item */
1094 B_N_PITEM_HEAD(tb
->R
[0],
1097 if (is_direntry_le_ih(pasted
)
1098 && pos_in_item
>= 0) {
1099 leaf_paste_entries(bi
.bi_bh
,
1116 RFALSE(item_pos
- n
+
1118 "PAP-12165: directory item must be first item of node when pasting is in 0th position");
1120 /* update delimiting keys */
1129 if (is_indirect_le_ih(pasted
))
1130 set_ih_free_space(pasted
, 0);
1131 zeros_num
= tb
->insert_size
[0] = 0;
1133 } else { /* new item doesn't fall into R[0] */
1135 leaf_shift_right(tb
, tb
->rnum
[0], tb
->rbytes
);
1138 default: /* cases d and t */
1139 reiserfs_panic(tb
->tb_sb
,
1140 "PAP-12175: balance_leaf: rnum > 0: unexpectable mode: %s(%d)",
1142 M_DELETE
) ? "DELETE" : ((flag
==
1150 /* tb->rnum[0] > 0 */
1151 RFALSE(tb
->blknum
[0] > 3,
1152 "PAP-12180: blknum can not be %d. It must be <= 3",
1154 RFALSE(tb
->blknum
[0] < 0,
1155 "PAP-12185: blknum can not be %d. It must be >= 0",
1158 /* if while adding to a node we discover that it is possible to split
1159 it in two, and merge the left part into the left neighbor and the
1160 right part into the right neighbor, eliminating the node */
1161 if (tb
->blknum
[0] == 0) { /* node S[0] is empty now */
1163 RFALSE(!tb
->lnum
[0] || !tb
->rnum
[0],
1164 "PAP-12190: lnum and rnum must not be zero");
1165 /* if insertion was done before 0-th position in R[0], right
1166 delimiting key of the tb->L[0]'s and left delimiting key are
1167 not set correctly */
1170 reiserfs_panic(tb
->tb_sb
,
1171 "vs-12195: balance_leaf: CFR not initialized");
1172 copy_key(B_N_PDELIM_KEY(tb
->CFL
[0], tb
->lkey
[0]),
1173 B_N_PDELIM_KEY(tb
->CFR
[0], tb
->rkey
[0]));
1174 do_balance_mark_internal_dirty(tb
, tb
->CFL
[0], 0);
1177 reiserfs_invalidate_buffer(tb
, tbS0
);
1181 /* Fill new nodes that appear in place of S[0] */
1183 /* I am told that this copying is because we need an array to enable
1184 the looping code. -Hans */
1185 snum
[0] = tb
->s1num
, snum
[1] = tb
->s2num
;
1186 sbytes
[0] = tb
->s1bytes
;
1187 sbytes
[1] = tb
->s2bytes
;
1188 for (i
= tb
->blknum
[0] - 2; i
>= 0; i
--) {
1190 RFALSE(!snum
[i
], "PAP-12200: snum[%d] == %d. Must be > 0", i
,
1193 /* here we shift from S to S_new nodes */
1195 S_new
[i
] = get_FEB(tb
);
1197 /* initialized block type and tree level */
1198 set_blkh_level(B_BLK_HEAD(S_new
[i
]), DISK_LEAF_NODE_LEVEL
);
1200 n
= B_NR_ITEMS(tbS0
);
1203 case M_INSERT
: /* insert item */
1205 if (n
- snum
[i
] < item_pos
) { /* new item or it's part falls to first new node S_new[i] */
1206 if (item_pos
== n
- snum
[i
] + 1 && sbytes
[i
] != -1) { /* part of new item falls into S_new[i] */
1207 int old_key_comp
, old_len
,
1212 /* Move snum[i]-1 items from S[0] to S_new[i] */
1213 leaf_move_items(LEAF_FROM_S_TO_SNEW
, tb
,
1216 /* Remember key component and item length */
1217 version
= ih_version(ih
);
1218 old_key_comp
= le_ih_k_offset(ih
);
1219 old_len
= ih_item_len(ih
);
1221 /* Calculate key component and item length to insert into S_new[i] */
1222 set_le_ih_k_offset(ih
,
1223 le_ih_k_offset(ih
) +
1232 put_ih_item_len(ih
, sbytes
[i
]);
1234 /* Insert part of the item into S_new[i] before 0-th item */
1236 bi
.bi_bh
= S_new
[i
];
1237 bi
.bi_parent
= NULL
;
1240 if ((old_len
- sbytes
[i
]) > zeros_num
) {
1249 zeros_num
- (old_len
-
1251 zeros_num
-= r_zeros_number
;
1254 leaf_insert_into_buf(&bi
, 0, ih
, r_body
,
1257 /* Calculate key component and item length to insert into S[i] */
1258 set_le_ih_k_offset(ih
, old_key_comp
);
1260 old_len
- sbytes
[i
]);
1261 tb
->insert_size
[0] -= sbytes
[i
];
1262 } else { /* whole new item falls into S_new[i] */
1264 /* Shift snum[0] - 1 items to S_new[i] (sbytes[i] of split item) */
1265 leaf_move_items(LEAF_FROM_S_TO_SNEW
, tb
,
1266 snum
[i
] - 1, sbytes
[i
],
1269 /* Insert new item into S_new[i] */
1271 bi
.bi_bh
= S_new
[i
];
1272 bi
.bi_parent
= NULL
;
1274 leaf_insert_into_buf(&bi
,
1279 zeros_num
= tb
->insert_size
[0] = 0;
1283 else { /* new item or it part don't falls into S_new[i] */
1285 leaf_move_items(LEAF_FROM_S_TO_SNEW
, tb
,
1286 snum
[i
], sbytes
[i
], S_new
[i
]);
1290 case M_PASTE
: /* append item */
1292 if (n
- snum
[i
] <= item_pos
) { /* pasted item or part if it falls to S_new[i] */
1293 if (item_pos
== n
- snum
[i
] && sbytes
[i
] != -1) { /* we must shift part of the appended item */
1294 struct item_head
*aux_ih
;
1296 RFALSE(ih
, "PAP-12210: ih must be 0");
1298 if (is_direntry_le_ih
1300 B_N_PITEM_HEAD(tbS0
, item_pos
))) {
1301 /* we append to directory item */
1306 ih_entry_count(aux_ih
);
1308 if (entry_count
- sbytes
[i
] <
1312 /* new directory entry falls into S_new[i] */
1316 "PAP-12215: insert_size is already 0");
1317 RFALSE(sbytes
[i
] - 1 >=
1319 "PAP-12220: there are no so much entries (%d), only %d",
1323 /* Shift snum[i]-1 items in whole. Shift sbytes[i] directory entries from directory item number snum[i] */
1325 (LEAF_FROM_S_TO_SNEW
,
1329 /* Paste given directory entry to directory item */
1331 bi
.bi_bh
= S_new
[i
];
1332 bi
.bi_parent
= NULL
;
1334 leaf_paste_in_buffer
1341 /* paste new directory entry */
1342 leaf_paste_entries(bi
.
1363 tb
->insert_size
[0] = 0;
1365 } else { /* new directory entry doesn't fall into S_new[i] */
1367 (LEAF_FROM_S_TO_SNEW
,
1372 } else { /* regular object */
1378 RFALSE(pos_in_item
!=
1382 || tb
->insert_size
[0] <=
1384 "PAP-12225: item too short or insert_size <= 0");
1386 /* Calculate number of bytes which must be shifted from appended item */
1393 (LEAF_FROM_S_TO_SNEW
, tb
,
1397 /* Calculate number of bytes which must remain in body after append to S_new[i] */
1399 tb
->insert_size
[0] -
1403 /* Append part of body into S_new[0] */
1405 bi
.bi_bh
= S_new
[i
];
1406 bi
.bi_parent
= NULL
;
1409 if (n_rem
> zeros_num
) {
1422 leaf_paste_in_buffer(&bi
, 0,
1431 struct item_head
*tmp
;
1434 B_N_PITEM_HEAD(S_new
1437 if (is_indirect_le_ih
1460 tb
->insert_size
[0] = n_rem
;
1465 /* item falls wholly into S_new[i] */
1468 struct item_head
*pasted
;
1470 #ifdef CONFIG_REISERFS_CHECK
1471 struct item_head
*ih
=
1472 B_N_PITEM_HEAD(tbS0
, item_pos
);
1474 if (!is_direntry_le_ih(ih
)
1475 && (pos_in_item
!= ih_item_len(ih
)
1476 || tb
->insert_size
[0] <= 0))
1477 reiserfs_panic(tb
->tb_sb
,
1478 "PAP-12235: balance_leaf: pos_in_item must be equal to ih_item_len");
1479 #endif /* CONFIG_REISERFS_CHECK */
1482 leaf_move_items(LEAF_FROM_S_TO_SNEW
,
1488 "PAP-12240: unexpected value returned by leaf_move_items (%d)",
1491 /* paste into item */
1493 bi
.bi_bh
= S_new
[i
];
1494 bi
.bi_parent
= NULL
;
1496 leaf_paste_in_buffer(&bi
,
1504 B_N_PITEM_HEAD(S_new
[i
],
1507 if (is_direntry_le_ih(pasted
)) {
1508 leaf_paste_entries(bi
.bi_bh
,
1524 /* if we paste to indirect item update ih_free_space */
1525 if (is_indirect_le_ih(pasted
))
1526 set_ih_free_space(pasted
, 0);
1527 zeros_num
= tb
->insert_size
[0] = 0;
1531 else { /* pasted item doesn't fall into S_new[i] */
1533 leaf_move_items(LEAF_FROM_S_TO_SNEW
, tb
,
1534 snum
[i
], sbytes
[i
], S_new
[i
]);
1537 default: /* cases d and t */
1538 reiserfs_panic(tb
->tb_sb
,
1539 "PAP-12245: balance_leaf: blknum > 2: unexpectable mode: %s(%d)",
1541 M_DELETE
) ? "DELETE" : ((flag
==
1547 memcpy(insert_key
+ i
, B_N_PKEY(S_new
[i
], 0), KEY_SIZE
);
1548 insert_ptr
[i
] = S_new
[i
];
1550 RFALSE(!buffer_journaled(S_new
[i
])
1551 || buffer_journal_dirty(S_new
[i
])
1552 || buffer_dirty(S_new
[i
]), "PAP-12247: S_new[%d] : (%b)",
1556 /* if the affected item was not wholly shifted then we perform all necessary operations on that part or whole of the
1557 affected item which remains in S */
1558 if (0 <= item_pos
&& item_pos
< tb
->s0num
) { /* if we must insert or append into buffer S[0] */
1561 case M_INSERT
: /* insert item into S[0] */
1564 bi
.bi_parent
= PATH_H_PPARENT(tb
->tb_path
, 0);
1565 bi
.bi_position
= PATH_H_POSITION(tb
->tb_path
, 1);
1566 leaf_insert_into_buf(&bi
, item_pos
, ih
, body
,
1569 /* If we insert the first key change the delimiting key */
1570 if (item_pos
== 0) {
1571 if (tb
->CFL
[0]) /* can be 0 in reiserfsck */
1572 replace_key(tb
, tb
->CFL
[0], tb
->lkey
[0],
1578 case M_PASTE
:{ /* append item in S[0] */
1579 struct item_head
*pasted
;
1581 pasted
= B_N_PITEM_HEAD(tbS0
, item_pos
);
1582 /* when directory, may be new entry already pasted */
1583 if (is_direntry_le_ih(pasted
)) {
1584 if (pos_in_item
>= 0 &&
1586 ih_entry_count(pasted
)) {
1588 RFALSE(!tb
->insert_size
[0],
1589 "PAP-12260: insert_size is 0 already");
1595 PATH_H_PPARENT(tb
->tb_path
,
1598 PATH_H_POSITION(tb
->tb_path
,
1600 leaf_paste_in_buffer(&bi
,
1609 leaf_paste_entries(bi
.bi_bh
,
1622 if (!item_pos
&& !pos_in_item
) {
1625 "PAP-12270: CFL[0]/L[0] must be specified");
1639 tb
->insert_size
[0] = 0;
1641 } else { /* regular object */
1642 if (pos_in_item
== ih_item_len(pasted
)) {
1644 RFALSE(tb
->insert_size
[0] <= 0,
1645 "PAP-12275: insert size must not be %d",
1646 tb
->insert_size
[0]);
1650 PATH_H_PPARENT(tb
->tb_path
,
1653 PATH_H_POSITION(tb
->tb_path
,
1655 leaf_paste_in_buffer(&bi
,
1663 if (is_indirect_le_ih(pasted
)) {
1668 "PAP-12280: insert_size for indirect item must be %d, not %d",
1676 tb
->insert_size
[0] = 0;
1678 #ifdef CONFIG_REISERFS_CHECK
1680 if (tb
->insert_size
[0]) {
1681 print_cur_tb("12285");
1684 "PAP-12285: balance_leaf: insert_size must be 0 (%d)",
1690 #endif /* CONFIG_REISERFS_CHECK */
1693 } /* case M_PASTE: */
1696 #ifdef CONFIG_REISERFS_CHECK
1697 if (flag
== M_PASTE
&& tb
->insert_size
[0]) {
1698 print_cur_tb("12290");
1699 reiserfs_panic(tb
->tb_sb
,
1700 "PAP-12290: balance_leaf: insert_size is still not 0 (%d)",
1701 tb
->insert_size
[0]);
1703 #endif /* CONFIG_REISERFS_CHECK */
1706 } /* Leaf level of the tree is balanced (end of balance_leaf) */
1708 /* Make empty node */
1709 void make_empty_node(struct buffer_info
*bi
)
1711 struct block_head
*blkh
;
1713 RFALSE(bi
->bi_bh
== NULL
, "PAP-12295: pointer to the buffer is NULL");
1715 blkh
= B_BLK_HEAD(bi
->bi_bh
);
1716 set_blkh_nr_item(blkh
, 0);
1717 set_blkh_free_space(blkh
, MAX_CHILD_SIZE(bi
->bi_bh
));
1720 B_N_CHILD(bi
->bi_parent
, bi
->bi_position
)->dc_size
= 0; /* Endian safe if 0 */
1723 /* Get first empty buffer */
1724 struct buffer_head
*get_FEB(struct tree_balance
*tb
)
1727 struct buffer_head
*first_b
;
1728 struct buffer_info bi
;
1730 for (i
= 0; i
< MAX_FEB_SIZE
; i
++)
1731 if (tb
->FEB
[i
] != 0)
1734 if (i
== MAX_FEB_SIZE
)
1735 reiserfs_panic(tb
->tb_sb
,
1736 "vs-12300: get_FEB: FEB list is empty");
1739 bi
.bi_bh
= first_b
= tb
->FEB
[i
];
1740 bi
.bi_parent
= NULL
;
1742 make_empty_node(&bi
);
1743 set_buffer_uptodate(first_b
);
1745 tb
->used
[i
] = first_b
;
1750 /* This is now used because reiserfs_free_block has to be able to
1753 static void store_thrown(struct tree_balance
*tb
, struct buffer_head
*bh
)
1757 if (buffer_dirty(bh
))
1758 reiserfs_warning(tb
->tb_sb
,
1759 "store_thrown deals with dirty buffer");
1760 for (i
= 0; i
< sizeof(tb
->thrown
) / sizeof(tb
->thrown
[0]); i
++)
1761 if (!tb
->thrown
[i
]) {
1763 get_bh(bh
); /* free_thrown puts this */
1766 reiserfs_warning(tb
->tb_sb
, "store_thrown: too many thrown buffers");
1769 static void free_thrown(struct tree_balance
*tb
)
1772 b_blocknr_t blocknr
;
1773 for (i
= 0; i
< sizeof(tb
->thrown
) / sizeof(tb
->thrown
[0]); i
++) {
1774 if (tb
->thrown
[i
]) {
1775 blocknr
= tb
->thrown
[i
]->b_blocknr
;
1776 if (buffer_dirty(tb
->thrown
[i
]))
1777 reiserfs_warning(tb
->tb_sb
,
1778 "free_thrown deals with dirty buffer %d",
1780 brelse(tb
->thrown
[i
]); /* incremented in store_thrown */
1781 reiserfs_free_block(tb
->transaction_handle
, NULL
,
1787 void reiserfs_invalidate_buffer(struct tree_balance
*tb
, struct buffer_head
*bh
)
1789 struct block_head
*blkh
;
1790 blkh
= B_BLK_HEAD(bh
);
1791 set_blkh_level(blkh
, FREE_LEVEL
);
1792 set_blkh_nr_item(blkh
, 0);
1794 clear_buffer_dirty(bh
);
1795 store_thrown(tb
, bh
);
1798 /* Replace n_dest'th key in buffer dest by n_src'th key of buffer src.*/
1799 void replace_key(struct tree_balance
*tb
, struct buffer_head
*dest
, int n_dest
,
1800 struct buffer_head
*src
, int n_src
)
1803 RFALSE(dest
== NULL
|| src
== NULL
,
1804 "vs-12305: source or destination buffer is 0 (src=%p, dest=%p)",
1806 RFALSE(!B_IS_KEYS_LEVEL(dest
),
1807 "vs-12310: invalid level (%z) for destination buffer. dest must be leaf",
1809 RFALSE(n_dest
< 0 || n_src
< 0,
1810 "vs-12315: src(%d) or dest(%d) key number < 0", n_src
, n_dest
);
1811 RFALSE(n_dest
>= B_NR_ITEMS(dest
) || n_src
>= B_NR_ITEMS(src
),
1812 "vs-12320: src(%d(%d)) or dest(%d(%d)) key number is too big",
1813 n_src
, B_NR_ITEMS(src
), n_dest
, B_NR_ITEMS(dest
));
1815 if (B_IS_ITEMS_LEVEL(src
))
1816 /* source buffer contains leaf node */
1817 memcpy(B_N_PDELIM_KEY(dest
, n_dest
), B_N_PITEM_HEAD(src
, n_src
),
1820 memcpy(B_N_PDELIM_KEY(dest
, n_dest
), B_N_PDELIM_KEY(src
, n_src
),
1823 do_balance_mark_internal_dirty(tb
, dest
, 0);
1826 int get_left_neighbor_position(struct tree_balance
*tb
, int h
)
1828 int Sh_position
= PATH_H_POSITION(tb
->tb_path
, h
+ 1);
1830 RFALSE(PATH_H_PPARENT(tb
->tb_path
, h
) == 0 || tb
->FL
[h
] == 0,
1831 "vs-12325: FL[%d](%p) or F[%d](%p) does not exist",
1832 h
, tb
->FL
[h
], h
, PATH_H_PPARENT(tb
->tb_path
, h
));
1834 if (Sh_position
== 0)
1835 return B_NR_ITEMS(tb
->FL
[h
]);
1837 return Sh_position
- 1;
1840 int get_right_neighbor_position(struct tree_balance
*tb
, int h
)
1842 int Sh_position
= PATH_H_POSITION(tb
->tb_path
, h
+ 1);
1844 RFALSE(PATH_H_PPARENT(tb
->tb_path
, h
) == 0 || tb
->FR
[h
] == 0,
1845 "vs-12330: F[%d](%p) or FR[%d](%p) does not exist",
1846 h
, PATH_H_PPARENT(tb
->tb_path
, h
), h
, tb
->FR
[h
]);
1848 if (Sh_position
== B_NR_ITEMS(PATH_H_PPARENT(tb
->tb_path
, h
)))
1851 return Sh_position
+ 1;
1854 #ifdef CONFIG_REISERFS_CHECK
1856 int is_reusable(struct super_block
*s
, b_blocknr_t block
, int bit_value
);
1857 static void check_internal_node(struct super_block
*s
, struct buffer_head
*bh
,
1860 struct disk_child
*dc
;
1863 RFALSE(!bh
, "PAP-12336: bh == 0");
1865 if (!bh
|| !B_IS_IN_TREE(bh
))
1868 RFALSE(!buffer_dirty(bh
) &&
1869 !(buffer_journaled(bh
) || buffer_journal_dirty(bh
)),
1870 "PAP-12337: buffer (%b) must be dirty", bh
);
1871 dc
= B_N_CHILD(bh
, 0);
1873 for (i
= 0; i
<= B_NR_ITEMS(bh
); i
++, dc
++) {
1874 if (!is_reusable(s
, dc_block_number(dc
), 1)) {
1877 "PAP-12338: check_internal_node: invalid child pointer %y in %b",
1883 static int locked_or_not_in_tree(struct buffer_head
*bh
, char *which
)
1885 if ((!buffer_journal_prepared(bh
) && buffer_locked(bh
)) ||
1886 !B_IS_IN_TREE(bh
)) {
1887 reiserfs_warning(NULL
,
1888 "vs-12339: locked_or_not_in_tree: %s (%b)",
1895 static int check_before_balancing(struct tree_balance
*tb
)
1900 reiserfs_panic(tb
->tb_sb
, "vs-12335: check_before_balancing: "
1901 "suspect that schedule occurred based on cur_tb not being null at this point in code. "
1902 "do_balance cannot properly handle schedule occurring while it runs.");
1905 /* double check that buffers that we will modify are unlocked. (fix_nodes should already have
1906 prepped all of these for us). */
1908 retval
|= locked_or_not_in_tree(tb
->L
[0], "L[0]");
1909 retval
|= locked_or_not_in_tree(tb
->FL
[0], "FL[0]");
1910 retval
|= locked_or_not_in_tree(tb
->CFL
[0], "CFL[0]");
1911 check_leaf(tb
->L
[0]);
1914 retval
|= locked_or_not_in_tree(tb
->R
[0], "R[0]");
1915 retval
|= locked_or_not_in_tree(tb
->FR
[0], "FR[0]");
1916 retval
|= locked_or_not_in_tree(tb
->CFR
[0], "CFR[0]");
1917 check_leaf(tb
->R
[0]);
1919 retval
|= locked_or_not_in_tree(PATH_PLAST_BUFFER(tb
->tb_path
), "S[0]");
1920 check_leaf(PATH_PLAST_BUFFER(tb
->tb_path
));
1925 static void check_after_balance_leaf(struct tree_balance
*tb
)
1928 if (B_FREE_SPACE(tb
->L
[0]) !=
1929 MAX_CHILD_SIZE(tb
->L
[0]) -
1931 (tb
->FL
[0], get_left_neighbor_position(tb
, 0)))) {
1932 print_cur_tb("12221");
1933 reiserfs_panic(tb
->tb_sb
,
1934 "PAP-12355: check_after_balance_leaf: shift to left was incorrect");
1938 if (B_FREE_SPACE(tb
->R
[0]) !=
1939 MAX_CHILD_SIZE(tb
->R
[0]) -
1941 (tb
->FR
[0], get_right_neighbor_position(tb
, 0)))) {
1942 print_cur_tb("12222");
1943 reiserfs_panic(tb
->tb_sb
,
1944 "PAP-12360: check_after_balance_leaf: shift to right was incorrect");
1947 if (PATH_H_PBUFFER(tb
->tb_path
, 1) &&
1948 (B_FREE_SPACE(PATH_H_PBUFFER(tb
->tb_path
, 0)) !=
1949 (MAX_CHILD_SIZE(PATH_H_PBUFFER(tb
->tb_path
, 0)) -
1950 dc_size(B_N_CHILD(PATH_H_PBUFFER(tb
->tb_path
, 1),
1951 PATH_H_POSITION(tb
->tb_path
, 1)))))) {
1952 int left
= B_FREE_SPACE(PATH_H_PBUFFER(tb
->tb_path
, 0));
1953 int right
= (MAX_CHILD_SIZE(PATH_H_PBUFFER(tb
->tb_path
, 0)) -
1954 dc_size(B_N_CHILD(PATH_H_PBUFFER(tb
->tb_path
, 1),
1955 PATH_H_POSITION(tb
->tb_path
,
1957 print_cur_tb("12223");
1958 reiserfs_warning(tb
->tb_sb
,
1959 "B_FREE_SPACE (PATH_H_PBUFFER(tb->tb_path,0)) = %d; "
1960 "MAX_CHILD_SIZE (%d) - dc_size( %y, %d ) [%d] = %d",
1962 MAX_CHILD_SIZE(PATH_H_PBUFFER(tb
->tb_path
, 0)),
1963 PATH_H_PBUFFER(tb
->tb_path
, 1),
1964 PATH_H_POSITION(tb
->tb_path
, 1),
1966 (PATH_H_PBUFFER(tb
->tb_path
, 1),
1967 PATH_H_POSITION(tb
->tb_path
, 1))),
1969 reiserfs_panic(tb
->tb_sb
,
1970 "PAP-12365: check_after_balance_leaf: S is incorrect");
1974 static void check_leaf_level(struct tree_balance
*tb
)
1976 check_leaf(tb
->L
[0]);
1977 check_leaf(tb
->R
[0]);
1978 check_leaf(PATH_PLAST_BUFFER(tb
->tb_path
));
1981 static void check_internal_levels(struct tree_balance
*tb
)
1985 /* check all internal nodes */
1986 for (h
= 1; tb
->insert_size
[h
]; h
++) {
1987 check_internal_node(tb
->tb_sb
, PATH_H_PBUFFER(tb
->tb_path
, h
),
1988 "BAD BUFFER ON PATH");
1990 check_internal_node(tb
->tb_sb
, tb
->L
[h
], "BAD L");
1992 check_internal_node(tb
->tb_sb
, tb
->R
[h
], "BAD R");
1999 /* Now we have all of the buffers that must be used in balancing of
2000 the tree. We rely on the assumption that schedule() will not occur
2001 while do_balance works. ( Only interrupt handlers are acceptable.)
2002 We balance the tree according to the analysis made before this,
2003 using buffers already obtained. For SMP support it will someday be
2004 necessary to add ordered locking of tb. */
2006 /* Some interesting rules of balancing:
2008 we delete a maximum of two nodes per level per balancing: we never
2009 delete R, when we delete two of three nodes L, S, R then we move
2012 we only delete L if we are deleting two nodes, if we delete only
2013 one node we delete S
2015 if we shift leaves then we shift as much as we can: this is a
2016 deliberate policy of extremism in node packing which results in
2017 higher average utilization after repeated random balance operations
2018 at the cost of more memory copies and more balancing as a result of
2019 small insertions to full nodes.
2021 if we shift internal nodes we try to evenly balance the node
2022 utilization, with consequent less balancing at the cost of lower
2025 one could argue that the policy for directories in leaves should be
2026 that of internal nodes, but we will wait until another day to
2027 evaluate this.... It would be nice to someday measure and prove
2028 these assumptions as to what is optimal....
2032 static inline void do_balance_starts(struct tree_balance
*tb
)
2034 /* use print_cur_tb() to see initial state of struct
2037 /* store_print_tb (tb); */
2039 /* do not delete, just comment it out */
2040 /* print_tb(flag, PATH_LAST_POSITION(tb->tb_path), tb->tb_path->pos_in_item, tb,
2042 RFALSE(check_before_balancing(tb
), "PAP-12340: locked buffers in TB");
2043 #ifdef CONFIG_REISERFS_CHECK
2048 static inline void do_balance_completed(struct tree_balance
*tb
)
2051 #ifdef CONFIG_REISERFS_CHECK
2052 check_leaf_level(tb
);
2053 check_internal_levels(tb
);
2057 /* reiserfs_free_block is no longer schedule safe. So, we need to
2058 ** put the buffers we want freed on the thrown list during do_balance,
2059 ** and then free them now
2062 REISERFS_SB(tb
->tb_sb
)->s_do_balance
++;
2064 /* release all nodes hold to perform the balancing */
2070 void do_balance(struct tree_balance
*tb
, /* tree_balance structure */
2071 struct item_head
*ih
, /* item header of inserted item */
2072 const char *body
, /* body of inserted item or bytes to paste */
2074 { /* i - insert, d - delete
2077 Cut means delete part of an item
2078 (includes removing an entry from a
2081 Delete means delete whole item.
2083 Insert means add a new item into the
2086 Paste means to append to the end of an
2087 existing file or to insert a directory
2089 int child_pos
, /* position of a child node in its parent */
2090 h
; /* level of the tree being processed */
2091 struct item_head insert_key
[2]; /* in our processing of one level
2092 we sometimes determine what
2093 must be inserted into the next
2094 higher level. This insertion
2095 consists of a key or two keys
2096 and their corresponding
2098 struct buffer_head
*insert_ptr
[2]; /* inserted node-ptrs for the next
2102 tb
->need_balance_dirty
= 0;
2104 if (FILESYSTEM_CHANGED_TB(tb
)) {
2105 reiserfs_panic(tb
->tb_sb
,
2106 "clm-6000: do_balance, fs generation has changed\n");
2108 /* if we have no real work to do */
2109 if (!tb
->insert_size
[0]) {
2110 reiserfs_warning(tb
->tb_sb
,
2111 "PAP-12350: do_balance: insert_size == 0, mode == %c",
2117 atomic_inc(&(fs_generation(tb
->tb_sb
)));
2118 do_balance_starts(tb
);
2120 /* balance leaf returns 0 except if combining L R and S into
2121 one node. see balance_internal() for explanation of this
2123 child_pos
= PATH_H_B_ITEM_ORDER(tb
->tb_path
, 0) +
2124 balance_leaf(tb
, ih
, body
, flag
, insert_key
, insert_ptr
);
2126 #ifdef CONFIG_REISERFS_CHECK
2127 check_after_balance_leaf(tb
);
2130 /* Balance internal level of the tree. */
2131 for (h
= 1; h
< MAX_HEIGHT
&& tb
->insert_size
[h
]; h
++)
2133 balance_internal(tb
, h
, child_pos
, insert_key
, insert_ptr
);
2135 do_balance_completed(tb
);