2 * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
5 /* Now we have all buffers that must be used in balancing of the tree */
6 /* Further calculations can not cause schedule(), and thus the buffer */
7 /* tree will be stable until the balancing will be finished */
8 /* balance the tree according to the analysis made before, */
9 /* and using buffers obtained after all above. */
12 ** balance_leaf_when_delete
18 #include <asm/uaccess.h>
19 #include <linux/time.h>
20 #include <linux/reiserfs_fs.h>
21 #include <linux/buffer_head.h>
22 #include <linux/kernel.h>
24 #ifdef CONFIG_REISERFS_CHECK
26 struct tree_balance
*cur_tb
= NULL
; /* detects whether more than one
27 copy of tb exists as a means
28 of checking whether schedule
29 is interrupting do_balance */
32 static inline void buffer_info_init_left(struct tree_balance
*tb
,
33 struct buffer_info
*bi
)
37 bi
->bi_parent
= tb
->FL
[0];
38 bi
->bi_position
= get_left_neighbor_position(tb
, 0);
41 static inline void buffer_info_init_right(struct tree_balance
*tb
,
42 struct buffer_info
*bi
)
46 bi
->bi_parent
= tb
->FR
[0];
47 bi
->bi_position
= get_right_neighbor_position(tb
, 0);
50 static inline void buffer_info_init_tbS0(struct tree_balance
*tb
,
51 struct buffer_info
*bi
)
54 bi
->bi_bh
= PATH_PLAST_BUFFER(tb
->tb_path
);
55 bi
->bi_parent
= PATH_H_PPARENT(tb
->tb_path
, 0);
56 bi
->bi_position
= PATH_H_POSITION(tb
->tb_path
, 1);
59 static inline void buffer_info_init_bh(struct tree_balance
*tb
,
60 struct buffer_info
*bi
,
61 struct buffer_head
*bh
)
69 inline void do_balance_mark_leaf_dirty(struct tree_balance
*tb
,
70 struct buffer_head
*bh
, int flag
)
72 journal_mark_dirty(tb
->transaction_handle
,
73 tb
->transaction_handle
->t_super
, bh
);
76 #define do_balance_mark_internal_dirty do_balance_mark_leaf_dirty
77 #define do_balance_mark_sb_dirty do_balance_mark_leaf_dirty
80 if deleting something ( tb->insert_size[0] < 0 )
81 return(balance_leaf_when_delete()); (flag d handled here)
83 if lnum is larger than 0 we put items into the left node
84 if rnum is larger than 0 we put items into the right node
85 if snum1 is larger than 0 we put items into the new node s1
86 if snum2 is larger than 0 we put items into the new node s2
87 Note that all *num* count new items being created.
89 It would be easier to read balance_leaf() if each of these summary
90 lines was a separate procedure rather than being inlined. I think
91 that there are many passages here and in balance_leaf_when_delete() in
92 which two calls to one procedure can replace two passages, and it
93 might save cache space and improve software maintenance costs to do so.
95 Vladimir made the perceptive comment that we should offload most of
96 the decision making in this function into fix_nodes/check_balance, and
97 then create some sort of structure in tb that says what actions should
98 be performed by do_balance.
102 /* Balance leaf node in case of delete or cut: insert_size[0] < 0
104 * lnum, rnum can have values >= -1
105 * -1 means that the neighbor must be joined with S
106 * 0 means that nothing should be done with the neighbor
107 * >0 means to shift entirely or partly the specified number of items to the neighbor
109 static int balance_leaf_when_delete(struct tree_balance
*tb
, int flag
)
111 struct buffer_head
*tbS0
= PATH_PLAST_BUFFER(tb
->tb_path
);
112 int item_pos
= PATH_LAST_POSITION(tb
->tb_path
);
113 int pos_in_item
= tb
->tb_path
->pos_in_item
;
114 struct buffer_info bi
;
116 struct item_head
*ih
;
118 RFALSE(tb
->FR
[0] && B_LEVEL(tb
->FR
[0]) != DISK_LEAF_NODE_LEVEL
+ 1,
119 "vs- 12000: level: wrong FR %z", tb
->FR
[0]);
120 RFALSE(tb
->blknum
[0] > 1,
121 "PAP-12005: tb->blknum == %d, can not be > 1", tb
->blknum
[0]);
122 RFALSE(!tb
->blknum
[0] && !PATH_H_PPARENT(tb
->tb_path
, 0),
123 "PAP-12010: tree can not be empty");
125 ih
= B_N_PITEM_HEAD(tbS0
, item_pos
);
126 buffer_info_init_tbS0(tb
, &bi
);
128 /* Delete or truncate the item */
131 case M_DELETE
: /* delete item in S[0] */
133 RFALSE(ih_item_len(ih
) + IH_SIZE
!= -tb
->insert_size
[0],
134 "vs-12013: mode Delete, insert size %d, ih to be deleted %h",
135 -tb
->insert_size
[0], ih
);
137 leaf_delete_items(&bi
, 0, item_pos
, 1, -1);
139 if (!item_pos
&& tb
->CFL
[0]) {
140 if (B_NR_ITEMS(tbS0
)) {
141 replace_key(tb
, tb
->CFL
[0], tb
->lkey
[0], tbS0
,
144 if (!PATH_H_POSITION(tb
->tb_path
, 1))
145 replace_key(tb
, tb
->CFL
[0], tb
->lkey
[0],
146 PATH_H_PPARENT(tb
->tb_path
,
151 RFALSE(!item_pos
&& !tb
->CFL
[0],
152 "PAP-12020: tb->CFL[0]==%p, tb->L[0]==%p", tb
->CFL
[0],
157 case M_CUT
:{ /* cut item in S[0] */
158 if (is_direntry_le_ih(ih
)) {
160 /* UFS unlink semantics are such that you can only delete one directory entry at a time. */
161 /* when we cut a directory tb->insert_size[0] means number of entries to be cut (always 1) */
162 tb
->insert_size
[0] = -1;
163 leaf_cut_from_buffer(&bi
, item_pos
, pos_in_item
,
164 -tb
->insert_size
[0]);
166 RFALSE(!item_pos
&& !pos_in_item
&& !tb
->CFL
[0],
167 "PAP-12030: can not change delimiting key. CFL[0]=%p",
170 if (!item_pos
&& !pos_in_item
&& tb
->CFL
[0]) {
171 replace_key(tb
, tb
->CFL
[0], tb
->lkey
[0],
175 leaf_cut_from_buffer(&bi
, item_pos
, pos_in_item
,
176 -tb
->insert_size
[0]);
178 RFALSE(!ih_item_len(ih
),
179 "PAP-12035: cut must leave non-zero dynamic length of item");
185 print_cur_tb("12040");
186 reiserfs_panic(tb
->tb_sb
, "PAP-12040",
187 "unexpected mode: %s(%d)",
189 M_PASTE
) ? "PASTE" : ((flag
==
190 M_INSERT
) ? "INSERT" :
194 /* the rule is that no shifting occurs unless by shifting a node can be freed */
195 n
= B_NR_ITEMS(tbS0
);
196 if (tb
->lnum
[0]) { /* L[0] takes part in balancing */
197 if (tb
->lnum
[0] == -1) { /* L[0] must be joined with S[0] */
198 if (tb
->rnum
[0] == -1) { /* R[0] must be also joined with S[0] */
199 if (tb
->FR
[0] == PATH_H_PPARENT(tb
->tb_path
, 0)) {
200 /* all contents of all the 3 buffers will be in L[0] */
201 if (PATH_H_POSITION(tb
->tb_path
, 1) == 0
202 && 1 < B_NR_ITEMS(tb
->FR
[0]))
203 replace_key(tb
, tb
->CFL
[0],
207 leaf_move_items(LEAF_FROM_S_TO_L
, tb
, n
,
209 leaf_move_items(LEAF_FROM_R_TO_L
, tb
,
210 B_NR_ITEMS(tb
->R
[0]),
213 reiserfs_invalidate_buffer(tb
, tbS0
);
214 reiserfs_invalidate_buffer(tb
,
219 /* all contents of all the 3 buffers will be in R[0] */
220 leaf_move_items(LEAF_FROM_S_TO_R
, tb
, n
, -1,
222 leaf_move_items(LEAF_FROM_L_TO_R
, tb
,
223 B_NR_ITEMS(tb
->L
[0]), -1, NULL
);
225 /* right_delimiting_key is correct in R[0] */
226 replace_key(tb
, tb
->CFR
[0], tb
->rkey
[0],
229 reiserfs_invalidate_buffer(tb
, tbS0
);
230 reiserfs_invalidate_buffer(tb
, tb
->L
[0]);
235 RFALSE(tb
->rnum
[0] != 0,
236 "PAP-12045: rnum must be 0 (%d)", tb
->rnum
[0]);
237 /* all contents of L[0] and S[0] will be in L[0] */
238 leaf_shift_left(tb
, n
, -1);
240 reiserfs_invalidate_buffer(tb
, tbS0
);
244 /* a part of contents of S[0] will be in L[0] and the rest part of S[0] will be in R[0] */
246 RFALSE((tb
->lnum
[0] + tb
->rnum
[0] < n
) ||
247 (tb
->lnum
[0] + tb
->rnum
[0] > n
+ 1),
248 "PAP-12050: rnum(%d) and lnum(%d) and item number(%d) in S[0] are not consistent",
249 tb
->rnum
[0], tb
->lnum
[0], n
);
250 RFALSE((tb
->lnum
[0] + tb
->rnum
[0] == n
) &&
251 (tb
->lbytes
!= -1 || tb
->rbytes
!= -1),
252 "PAP-12055: bad rbytes (%d)/lbytes (%d) parameters when items are not split",
253 tb
->rbytes
, tb
->lbytes
);
254 RFALSE((tb
->lnum
[0] + tb
->rnum
[0] == n
+ 1) &&
255 (tb
->lbytes
< 1 || tb
->rbytes
!= -1),
256 "PAP-12060: bad rbytes (%d)/lbytes (%d) parameters when items are split",
257 tb
->rbytes
, tb
->lbytes
);
259 leaf_shift_left(tb
, tb
->lnum
[0], tb
->lbytes
);
260 leaf_shift_right(tb
, tb
->rnum
[0], tb
->rbytes
);
262 reiserfs_invalidate_buffer(tb
, tbS0
);
267 if (tb
->rnum
[0] == -1) {
268 /* all contents of R[0] and S[0] will be in R[0] */
269 leaf_shift_right(tb
, n
, -1);
270 reiserfs_invalidate_buffer(tb
, tbS0
);
275 "PAP-12065: bad rnum parameter must be 0 (%d)", tb
->rnum
[0]);
279 static int balance_leaf(struct tree_balance
*tb
, struct item_head
*ih
, /* item header of inserted item (this is on little endian) */
280 const char *body
, /* body of inserted item or bytes to paste */
281 int flag
, /* i - insert, d - delete, c - cut, p - paste
282 (see comment to do_balance) */
283 struct item_head
*insert_key
, /* in our processing of one level we sometimes determine what
284 must be inserted into the next higher level. This insertion
285 consists of a key or two keys and their corresponding
287 struct buffer_head
**insert_ptr
/* inserted node-ptrs for the next level */
290 struct buffer_head
*tbS0
= PATH_PLAST_BUFFER(tb
->tb_path
);
291 int item_pos
= PATH_LAST_POSITION(tb
->tb_path
); /* index into the array of item headers in S[0]
292 of the affected item */
293 struct buffer_info bi
;
294 struct buffer_head
*S_new
[2]; /* new nodes allocated to hold what could not fit into S */
295 int snum
[2]; /* number of items that will be placed
296 into S_new (includes partially shifted
298 int sbytes
[2]; /* if an item is partially shifted into S_new then
299 if it is a directory item
300 it is the number of entries from the item that are shifted into S_new
302 it is the number of bytes from the item that are shifted into S_new
309 PROC_INFO_INC(tb
->tb_sb
, balance_at
[0]);
311 /* Make balance in case insert_size[0] < 0 */
312 if (tb
->insert_size
[0] < 0)
313 return balance_leaf_when_delete(tb
, flag
);
316 if (flag
== M_INSERT
&& !body
)
317 zeros_num
= ih_item_len(ih
);
319 pos_in_item
= tb
->tb_path
->pos_in_item
;
320 /* for indirect item pos_in_item is measured in unformatted node
321 pointers. Recalculate to bytes */
323 && is_indirect_le_ih(B_N_PITEM_HEAD(tbS0
, item_pos
)))
324 pos_in_item
*= UNFM_P_SIZE
;
326 if (tb
->lnum
[0] > 0) {
327 /* Shift lnum[0] items from S[0] to the left neighbor L[0] */
328 if (item_pos
< tb
->lnum
[0]) {
329 /* new item or it part falls to L[0], shift it too */
330 n
= B_NR_ITEMS(tb
->L
[0]);
333 case M_INSERT
: /* insert item into L[0] */
335 if (item_pos
== tb
->lnum
[0] - 1
336 && tb
->lbytes
!= -1) {
337 /* part of new item falls into L[0] */
342 leaf_shift_left(tb
, tb
->lnum
[0] - 1,
345 /* Calculate item length to insert to S[0] */
347 ih_item_len(ih
) - tb
->lbytes
;
348 /* Calculate and check item length to insert to L[0] */
353 RFALSE(ih_item_len(ih
) <= 0,
354 "PAP-12080: there is nothing to insert into L[0]: ih_item_len=%d",
357 /* Insert new item into L[0] */
358 buffer_info_init_left(tb
, &bi
);
359 leaf_insert_into_buf(&bi
,
367 version
= ih_version(ih
);
369 /* Calculate key component, item length and body to insert into S[0] */
370 set_le_ih_k_offset(ih
,
380 put_ih_item_len(ih
, new_item_len
);
381 if (tb
->lbytes
> zeros_num
) {
383 (tb
->lbytes
- zeros_num
);
386 zeros_num
-= tb
->lbytes
;
388 RFALSE(ih_item_len(ih
) <= 0,
389 "PAP-12085: there is nothing to insert into S[0]: ih_item_len=%d",
392 /* new item in whole falls into L[0] */
393 /* Shift lnum[0]-1 items to L[0] */
395 leaf_shift_left(tb
, tb
->lnum
[0] - 1,
397 /* Insert new item into L[0] */
398 buffer_info_init_left(tb
, &bi
);
399 leaf_insert_into_buf(&bi
,
403 tb
->insert_size
[0] = 0;
408 case M_PASTE
: /* append item in L[0] */
410 if (item_pos
== tb
->lnum
[0] - 1
411 && tb
->lbytes
!= -1) {
412 /* we must shift the part of the appended item */
413 if (is_direntry_le_ih
414 (B_N_PITEM_HEAD(tbS0
, item_pos
))) {
417 "PAP-12090: invalid parameter in case of a directory");
419 if (tb
->lbytes
> pos_in_item
) {
420 /* new directory entry falls into L[0] */
426 /* Shift lnum[0] - 1 items in whole. Shift lbytes - 1 entries from given directory item */
453 /* Append given directory entry to directory item */
454 buffer_info_init_left(tb
, &bi
);
463 /* previous string prepared space for pasting new entry, following string pastes this entry */
465 /* when we have merge directory item, pos_in_item has been changed too */
467 /* paste new directory entry. 1 is entry number */
468 leaf_paste_entries(&bi
,
486 tb
->insert_size
[0] = 0;
488 /* new directory item doesn't fall into L[0] */
489 /* Shift lnum[0]-1 items in whole. Shift lbytes directory entries from directory item number lnum[0] */
496 /* Calculate new position to append in item body */
497 pos_in_item
-= tb
->lbytes
;
500 RFALSE(tb
->lbytes
<= 0,
501 "PAP-12095: there is nothing to shift to L[0]. lbytes=%d",
503 RFALSE(pos_in_item
!=
507 "PAP-12100: incorrect position to paste: item_len=%d, pos_in_item=%d",
513 if (tb
->lbytes
>= pos_in_item
) {
514 /* appended item will be in L[0] in whole */
517 /* this bytes number must be appended to the last item of L[h] */
522 /* Calculate new insert_size[0] */
523 tb
->insert_size
[0] -=
529 "PAP-12105: there is nothing to paste into L[0]. insert_size=%d",
541 /* Append to body of item in L[0] */
542 buffer_info_init_left(tb
, &bi
);
556 /* 0-th item in S0 can be only of DIRECT type when l_n != 0 */
567 "PAP-12106: item length must be 0");
578 "PAP-12107: items must be of the same file");
579 if (is_indirect_le_ih(B_N_PITEM_HEAD(tb
->L
[0], n
+ item_pos
- ret_val
))) {
589 /* update key of first item in S0 */
604 /* update left delimiting key */
622 /* Calculate new body, position in item and insert_size[0] */
623 if (l_n
> zeros_num
) {
642 !op_is_left_mergeable
646 !op_is_left_mergeable
651 "PAP-12120: item must be merge-able with left neighboring item");
652 } else { /* only part of the appended item will be in L[0] */
654 /* Calculate position in item for append in S[0] */
658 RFALSE(pos_in_item
<= 0,
659 "PAP-12125: no place for paste. pos_in_item=%d",
662 /* Shift lnum[0] - 1 items in whole. Shift lbytes - 1 byte from item number lnum[0] */
670 } else { /* appended item will be in L[0] in whole */
672 struct item_head
*pasted
;
674 if (!item_pos
&& op_is_left_mergeable(B_N_PKEY(tbS0
, 0), tbS0
->b_size
)) { /* if we paste into first item of S[0] and it is left mergable */
675 /* then increment pos_in_item by the size of the last item in L[0] */
677 B_N_PITEM_HEAD(tb
->L
[0],
679 if (is_direntry_le_ih(pasted
))
688 /* Shift lnum[0] - 1 items in whole. Shift lbytes - 1 byte from item number lnum[0] */
690 leaf_shift_left(tb
, tb
->lnum
[0],
692 /* Append to body of item in L[0] */
693 buffer_info_init_left(tb
, &bi
);
694 leaf_paste_in_buffer(&bi
,
701 /* if appended item is directory, paste entry */
703 B_N_PITEM_HEAD(tb
->L
[0],
706 if (is_direntry_le_ih(pasted
))
707 leaf_paste_entries(&bi
,
722 /* if appended item is indirect item, put unformatted node into un list */
723 if (is_indirect_le_ih(pasted
))
724 set_ih_free_space(pasted
, 0);
725 tb
->insert_size
[0] = 0;
729 default: /* cases d and t */
730 reiserfs_panic(tb
->tb_sb
, "PAP-12130",
731 "lnum > 0: unexpected mode: "
734 M_DELETE
) ? "DELETE" : ((flag
==
742 /* new item doesn't fall into L[0] */
743 leaf_shift_left(tb
, tb
->lnum
[0], tb
->lbytes
);
747 /* tb->lnum[0] > 0 */
748 /* Calculate new item position */
749 item_pos
-= (tb
->lnum
[0] - ((tb
->lbytes
!= -1) ? 1 : 0));
751 if (tb
->rnum
[0] > 0) {
752 /* shift rnum[0] items from S[0] to the right neighbor R[0] */
753 n
= B_NR_ITEMS(tbS0
);
756 case M_INSERT
: /* insert item */
757 if (n
- tb
->rnum
[0] < item_pos
) { /* new item or its part falls to R[0] */
758 if (item_pos
== n
- tb
->rnum
[0] + 1 && tb
->rbytes
!= -1) { /* part of new item falls into R[0] */
759 loff_t old_key_comp
, old_len
,
765 leaf_shift_right(tb
, tb
->rnum
[0] - 1,
768 version
= ih_version(ih
);
769 /* Remember key component and item length */
770 old_key_comp
= le_ih_k_offset(ih
);
771 old_len
= ih_item_len(ih
);
773 /* Calculate key component and item length to insert into R[0] */
778 rbytes
) << (is_indirect_le_ih(ih
)
782 set_le_ih_k_offset(ih
, offset
);
783 put_ih_item_len(ih
, tb
->rbytes
);
784 /* Insert part of the item into R[0] */
785 buffer_info_init_right(tb
, &bi
);
786 if ((old_len
- tb
->rbytes
) > zeros_num
) {
795 zeros_num
- (old_len
-
797 zeros_num
-= r_zeros_number
;
800 leaf_insert_into_buf(&bi
, 0, ih
, r_body
,
803 /* Replace right delimiting key by first key in R[0] */
804 replace_key(tb
, tb
->CFR
[0], tb
->rkey
[0],
807 /* Calculate key component and item length to insert into S[0] */
808 set_le_ih_k_offset(ih
, old_key_comp
);
810 old_len
- tb
->rbytes
);
812 tb
->insert_size
[0] -= tb
->rbytes
;
814 } else { /* whole new item falls into R[0] */
816 /* Shift rnum[0]-1 items to R[0] */
821 /* Insert new item into R[0] */
822 buffer_info_init_right(tb
, &bi
);
823 leaf_insert_into_buf(&bi
,
829 if (item_pos
- n
+ tb
->rnum
[0] - 1 == 0) {
830 replace_key(tb
, tb
->CFR
[0],
835 zeros_num
= tb
->insert_size
[0] = 0;
837 } else { /* new item or part of it doesn't fall into R[0] */
839 leaf_shift_right(tb
, tb
->rnum
[0], tb
->rbytes
);
843 case M_PASTE
: /* append item */
845 if (n
- tb
->rnum
[0] <= item_pos
) { /* pasted item or part of it falls to R[0] */
846 if (item_pos
== n
- tb
->rnum
[0] && tb
->rbytes
!= -1) { /* we must shift the part of the appended item */
847 if (is_direntry_le_ih(B_N_PITEM_HEAD(tbS0
, item_pos
))) { /* we append to directory item */
851 "PAP-12145: invalid parameter in case of a directory");
853 I_ENTRY_COUNT(B_N_PITEM_HEAD
856 if (entry_count
- tb
->rbytes
<
858 /* new directory entry falls into R[0] */
860 int paste_entry_position
;
862 RFALSE(tb
->rbytes
- 1 >=
866 "PAP-12150: no enough of entries to shift to R[0]: rbytes=%d, entry_count=%d",
869 /* Shift rnum[0]-1 items in whole. Shift rbytes-1 directory entries from directory item number rnum[0] */
877 /* Paste given directory entry to directory item */
878 paste_entry_position
=
882 buffer_info_init_right(tb
, &bi
);
885 paste_entry_position
,
889 leaf_paste_entries(&bi
,
891 paste_entry_position
,
905 if (paste_entry_position
907 /* change delimiting keys */
921 tb
->insert_size
[0] = 0;
923 } else { /* new directory entry doesn't fall into R[0] */
932 } else { /* regular object */
938 /* Calculate number of bytes which must be shifted from appended item */
941 tb
->insert_size
[0]) < 0)
944 RFALSE(pos_in_item
!=
948 "PAP-12155: invalid position to paste. ih_item_len=%d, pos_in_item=%d",
957 /* Calculate number of bytes which must remain in body after appending to R[0] */
965 unsigned long temp_rem
=
972 if (is_indirect_le_key
1007 /* k_offset (B_N_PKEY(tb->R[0],0)) += n_rem;
1008 k_offset (B_N_PDELIM_KEY(tb->CFR[0],tb->rkey[0])) += n_rem;*/
1009 do_balance_mark_internal_dirty
1010 (tb
, tb
->CFR
[0], 0);
1012 /* Append part of body into R[0] */
1013 buffer_info_init_right(tb
, &bi
);
1014 if (n_rem
> zeros_num
) {
1027 leaf_paste_in_buffer(&bi
, 0,
1036 if (is_indirect_le_ih
1041 "PAP-12160: paste more than one unformatted node pointer");
1047 tb
->insert_size
[0] = n_rem
;
1051 } else { /* pasted item in whole falls into R[0] */
1053 struct item_head
*pasted
;
1056 leaf_shift_right(tb
, tb
->rnum
[0],
1058 /* append item in R[0] */
1059 if (pos_in_item
>= 0) {
1060 buffer_info_init_right(tb
, &bi
);
1061 leaf_paste_in_buffer(&bi
,
1073 /* paste new entry, if item is directory item */
1075 B_N_PITEM_HEAD(tb
->R
[0],
1078 if (is_direntry_le_ih(pasted
)
1079 && pos_in_item
>= 0) {
1080 leaf_paste_entries(&bi
,
1097 RFALSE(item_pos
- n
+
1099 "PAP-12165: directory item must be first item of node when pasting is in 0th position");
1101 /* update delimiting keys */
1110 if (is_indirect_le_ih(pasted
))
1111 set_ih_free_space(pasted
, 0);
1112 zeros_num
= tb
->insert_size
[0] = 0;
1114 } else { /* new item doesn't fall into R[0] */
1116 leaf_shift_right(tb
, tb
->rnum
[0], tb
->rbytes
);
1119 default: /* cases d and t */
1120 reiserfs_panic(tb
->tb_sb
, "PAP-12175",
1121 "rnum > 0: unexpected mode: %s(%d)",
1123 M_DELETE
) ? "DELETE" : ((flag
==
1131 /* tb->rnum[0] > 0 */
1132 RFALSE(tb
->blknum
[0] > 3,
1133 "PAP-12180: blknum can not be %d. It must be <= 3",
1135 RFALSE(tb
->blknum
[0] < 0,
1136 "PAP-12185: blknum can not be %d. It must be >= 0",
1139 /* if while adding to a node we discover that it is possible to split
1140 it in two, and merge the left part into the left neighbor and the
1141 right part into the right neighbor, eliminating the node */
1142 if (tb
->blknum
[0] == 0) { /* node S[0] is empty now */
1144 RFALSE(!tb
->lnum
[0] || !tb
->rnum
[0],
1145 "PAP-12190: lnum and rnum must not be zero");
1146 /* if insertion was done before 0-th position in R[0], right
1147 delimiting key of the tb->L[0]'s and left delimiting key are
1148 not set correctly */
1151 reiserfs_panic(tb
->tb_sb
, "vs-12195",
1152 "CFR not initialized");
1153 copy_key(B_N_PDELIM_KEY(tb
->CFL
[0], tb
->lkey
[0]),
1154 B_N_PDELIM_KEY(tb
->CFR
[0], tb
->rkey
[0]));
1155 do_balance_mark_internal_dirty(tb
, tb
->CFL
[0], 0);
1158 reiserfs_invalidate_buffer(tb
, tbS0
);
1162 /* Fill new nodes that appear in place of S[0] */
1164 /* I am told that this copying is because we need an array to enable
1165 the looping code. -Hans */
1166 snum
[0] = tb
->s1num
, snum
[1] = tb
->s2num
;
1167 sbytes
[0] = tb
->s1bytes
;
1168 sbytes
[1] = tb
->s2bytes
;
1169 for (i
= tb
->blknum
[0] - 2; i
>= 0; i
--) {
1171 RFALSE(!snum
[i
], "PAP-12200: snum[%d] == %d. Must be > 0", i
,
1174 /* here we shift from S to S_new nodes */
1176 S_new
[i
] = get_FEB(tb
);
1178 /* initialized block type and tree level */
1179 set_blkh_level(B_BLK_HEAD(S_new
[i
]), DISK_LEAF_NODE_LEVEL
);
1181 n
= B_NR_ITEMS(tbS0
);
1184 case M_INSERT
: /* insert item */
1186 if (n
- snum
[i
] < item_pos
) { /* new item or it's part falls to first new node S_new[i] */
1187 if (item_pos
== n
- snum
[i
] + 1 && sbytes
[i
] != -1) { /* part of new item falls into S_new[i] */
1188 int old_key_comp
, old_len
,
1193 /* Move snum[i]-1 items from S[0] to S_new[i] */
1194 leaf_move_items(LEAF_FROM_S_TO_SNEW
, tb
,
1197 /* Remember key component and item length */
1198 version
= ih_version(ih
);
1199 old_key_comp
= le_ih_k_offset(ih
);
1200 old_len
= ih_item_len(ih
);
1202 /* Calculate key component and item length to insert into S_new[i] */
1203 set_le_ih_k_offset(ih
,
1204 le_ih_k_offset(ih
) +
1213 put_ih_item_len(ih
, sbytes
[i
]);
1215 /* Insert part of the item into S_new[i] before 0-th item */
1216 buffer_info_init_bh(tb
, &bi
, S_new
[i
]);
1218 if ((old_len
- sbytes
[i
]) > zeros_num
) {
1227 zeros_num
- (old_len
-
1229 zeros_num
-= r_zeros_number
;
1232 leaf_insert_into_buf(&bi
, 0, ih
, r_body
,
1235 /* Calculate key component and item length to insert into S[i] */
1236 set_le_ih_k_offset(ih
, old_key_comp
);
1238 old_len
- sbytes
[i
]);
1239 tb
->insert_size
[0] -= sbytes
[i
];
1240 } else { /* whole new item falls into S_new[i] */
1242 /* Shift snum[0] - 1 items to S_new[i] (sbytes[i] of split item) */
1243 leaf_move_items(LEAF_FROM_S_TO_SNEW
, tb
,
1244 snum
[i
] - 1, sbytes
[i
],
1247 /* Insert new item into S_new[i] */
1248 buffer_info_init_bh(tb
, &bi
, S_new
[i
]);
1249 leaf_insert_into_buf(&bi
,
1254 zeros_num
= tb
->insert_size
[0] = 0;
1258 else { /* new item or it part don't falls into S_new[i] */
1260 leaf_move_items(LEAF_FROM_S_TO_SNEW
, tb
,
1261 snum
[i
], sbytes
[i
], S_new
[i
]);
1265 case M_PASTE
: /* append item */
1267 if (n
- snum
[i
] <= item_pos
) { /* pasted item or part if it falls to S_new[i] */
1268 if (item_pos
== n
- snum
[i
] && sbytes
[i
] != -1) { /* we must shift part of the appended item */
1269 struct item_head
*aux_ih
;
1271 RFALSE(ih
, "PAP-12210: ih must be 0");
1273 if (is_direntry_le_ih
1275 B_N_PITEM_HEAD(tbS0
, item_pos
))) {
1276 /* we append to directory item */
1281 ih_entry_count(aux_ih
);
1283 if (entry_count
- sbytes
[i
] <
1287 /* new directory entry falls into S_new[i] */
1291 "PAP-12215: insert_size is already 0");
1292 RFALSE(sbytes
[i
] - 1 >=
1294 "PAP-12220: there are no so much entries (%d), only %d",
1298 /* Shift snum[i]-1 items in whole. Shift sbytes[i] directory entries from directory item number snum[i] */
1300 (LEAF_FROM_S_TO_SNEW
,
1304 /* Paste given directory entry to directory item */
1305 buffer_info_init_bh(tb
, &bi
, S_new
[i
]);
1306 leaf_paste_in_buffer
1313 /* paste new directory entry */
1314 leaf_paste_entries(&bi
,
1334 tb
->insert_size
[0] = 0;
1336 } else { /* new directory entry doesn't fall into S_new[i] */
1338 (LEAF_FROM_S_TO_SNEW
,
1343 } else { /* regular object */
1349 RFALSE(pos_in_item
!=
1353 || tb
->insert_size
[0] <=
1355 "PAP-12225: item too short or insert_size <= 0");
1357 /* Calculate number of bytes which must be shifted from appended item */
1364 (LEAF_FROM_S_TO_SNEW
, tb
,
1368 /* Calculate number of bytes which must remain in body after append to S_new[i] */
1370 tb
->insert_size
[0] -
1374 /* Append part of body into S_new[0] */
1375 buffer_info_init_bh(tb
, &bi
, S_new
[i
]);
1376 if (n_rem
> zeros_num
) {
1389 leaf_paste_in_buffer(&bi
, 0,
1398 struct item_head
*tmp
;
1401 B_N_PITEM_HEAD(S_new
1404 if (is_indirect_le_ih
1427 tb
->insert_size
[0] = n_rem
;
1432 /* item falls wholly into S_new[i] */
1435 struct item_head
*pasted
;
1437 #ifdef CONFIG_REISERFS_CHECK
1438 struct item_head
*ih_check
=
1439 B_N_PITEM_HEAD(tbS0
, item_pos
);
1441 if (!is_direntry_le_ih(ih_check
)
1442 && (pos_in_item
!= ih_item_len(ih_check
)
1443 || tb
->insert_size
[0] <= 0))
1444 reiserfs_panic(tb
->tb_sb
,
1449 #endif /* CONFIG_REISERFS_CHECK */
1452 leaf_move_items(LEAF_FROM_S_TO_SNEW
,
1458 "PAP-12240: unexpected value returned by leaf_move_items (%d)",
1461 /* paste into item */
1462 buffer_info_init_bh(tb
, &bi
, S_new
[i
]);
1463 leaf_paste_in_buffer(&bi
,
1471 B_N_PITEM_HEAD(S_new
[i
],
1474 if (is_direntry_le_ih(pasted
)) {
1475 leaf_paste_entries(&bi
,
1491 /* if we paste to indirect item update ih_free_space */
1492 if (is_indirect_le_ih(pasted
))
1493 set_ih_free_space(pasted
, 0);
1494 zeros_num
= tb
->insert_size
[0] = 0;
1498 else { /* pasted item doesn't fall into S_new[i] */
1500 leaf_move_items(LEAF_FROM_S_TO_SNEW
, tb
,
1501 snum
[i
], sbytes
[i
], S_new
[i
]);
1504 default: /* cases d and t */
1505 reiserfs_panic(tb
->tb_sb
, "PAP-12245",
1506 "blknum > 2: unexpected mode: %s(%d)",
1508 M_DELETE
) ? "DELETE" : ((flag
==
1514 memcpy(insert_key
+ i
, B_N_PKEY(S_new
[i
], 0), KEY_SIZE
);
1515 insert_ptr
[i
] = S_new
[i
];
1517 RFALSE(!buffer_journaled(S_new
[i
])
1518 || buffer_journal_dirty(S_new
[i
])
1519 || buffer_dirty(S_new
[i
]), "PAP-12247: S_new[%d] : (%b)",
1523 /* if the affected item was not wholly shifted then we perform all necessary operations on that part or whole of the
1524 affected item which remains in S */
1525 if (0 <= item_pos
&& item_pos
< tb
->s0num
) { /* if we must insert or append into buffer S[0] */
1528 case M_INSERT
: /* insert item into S[0] */
1529 buffer_info_init_tbS0(tb
, &bi
);
1530 leaf_insert_into_buf(&bi
, item_pos
, ih
, body
,
1533 /* If we insert the first key change the delimiting key */
1534 if (item_pos
== 0) {
1535 if (tb
->CFL
[0]) /* can be 0 in reiserfsck */
1536 replace_key(tb
, tb
->CFL
[0], tb
->lkey
[0],
1542 case M_PASTE
:{ /* append item in S[0] */
1543 struct item_head
*pasted
;
1545 pasted
= B_N_PITEM_HEAD(tbS0
, item_pos
);
1546 /* when directory, may be new entry already pasted */
1547 if (is_direntry_le_ih(pasted
)) {
1548 if (pos_in_item
>= 0 &&
1550 ih_entry_count(pasted
)) {
1552 RFALSE(!tb
->insert_size
[0],
1553 "PAP-12260: insert_size is 0 already");
1556 buffer_info_init_tbS0(tb
, &bi
);
1557 leaf_paste_in_buffer(&bi
,
1566 leaf_paste_entries(&bi
,
1579 if (!item_pos
&& !pos_in_item
) {
1582 "PAP-12270: CFL[0]/L[0] must be specified");
1596 tb
->insert_size
[0] = 0;
1598 } else { /* regular object */
1599 if (pos_in_item
== ih_item_len(pasted
)) {
1601 RFALSE(tb
->insert_size
[0] <= 0,
1602 "PAP-12275: insert size must not be %d",
1603 tb
->insert_size
[0]);
1604 buffer_info_init_tbS0(tb
, &bi
);
1605 leaf_paste_in_buffer(&bi
,
1613 if (is_indirect_le_ih(pasted
)) {
1618 "PAP-12280: insert_size for indirect item must be %d, not %d",
1626 tb
->insert_size
[0] = 0;
1628 #ifdef CONFIG_REISERFS_CHECK
1630 if (tb
->insert_size
[0]) {
1631 print_cur_tb("12285");
1638 tb
->insert_size
[0]);
1641 #endif /* CONFIG_REISERFS_CHECK */
1644 } /* case M_PASTE: */
1647 #ifdef CONFIG_REISERFS_CHECK
1648 if (flag
== M_PASTE
&& tb
->insert_size
[0]) {
1649 print_cur_tb("12290");
1650 reiserfs_panic(tb
->tb_sb
,
1651 "PAP-12290", "insert_size is still not 0 (%d)",
1652 tb
->insert_size
[0]);
1654 #endif /* CONFIG_REISERFS_CHECK */
1656 } /* Leaf level of the tree is balanced (end of balance_leaf) */
1658 /* Make empty node */
1659 void make_empty_node(struct buffer_info
*bi
)
1661 struct block_head
*blkh
;
1663 RFALSE(bi
->bi_bh
== NULL
, "PAP-12295: pointer to the buffer is NULL");
1665 blkh
= B_BLK_HEAD(bi
->bi_bh
);
1666 set_blkh_nr_item(blkh
, 0);
1667 set_blkh_free_space(blkh
, MAX_CHILD_SIZE(bi
->bi_bh
));
1670 B_N_CHILD(bi
->bi_parent
, bi
->bi_position
)->dc_size
= 0; /* Endian safe if 0 */
1673 /* Get first empty buffer */
1674 struct buffer_head
*get_FEB(struct tree_balance
*tb
)
1677 struct buffer_info bi
;
1679 for (i
= 0; i
< MAX_FEB_SIZE
; i
++)
1680 if (tb
->FEB
[i
] != NULL
)
1683 if (i
== MAX_FEB_SIZE
)
1684 reiserfs_panic(tb
->tb_sb
, "vs-12300", "FEB list is empty");
1686 buffer_info_init_bh(tb
, &bi
, tb
->FEB
[i
]);
1687 make_empty_node(&bi
);
1688 set_buffer_uptodate(tb
->FEB
[i
]);
1689 tb
->used
[i
] = tb
->FEB
[i
];
1695 /* This is now used because reiserfs_free_block has to be able to
1698 static void store_thrown(struct tree_balance
*tb
, struct buffer_head
*bh
)
1702 if (buffer_dirty(bh
))
1703 reiserfs_warning(tb
->tb_sb
, "reiserfs-12320",
1704 "called with dirty buffer");
1705 for (i
= 0; i
< ARRAY_SIZE(tb
->thrown
); i
++)
1706 if (!tb
->thrown
[i
]) {
1708 get_bh(bh
); /* free_thrown puts this */
1711 reiserfs_warning(tb
->tb_sb
, "reiserfs-12321",
1712 "too many thrown buffers");
1715 static void free_thrown(struct tree_balance
*tb
)
1718 b_blocknr_t blocknr
;
1719 for (i
= 0; i
< ARRAY_SIZE(tb
->thrown
); i
++) {
1720 if (tb
->thrown
[i
]) {
1721 blocknr
= tb
->thrown
[i
]->b_blocknr
;
1722 if (buffer_dirty(tb
->thrown
[i
]))
1723 reiserfs_warning(tb
->tb_sb
, "reiserfs-12322",
1724 "called with dirty buffer %d",
1726 brelse(tb
->thrown
[i
]); /* incremented in store_thrown */
1727 reiserfs_free_block(tb
->transaction_handle
, NULL
,
1733 void reiserfs_invalidate_buffer(struct tree_balance
*tb
, struct buffer_head
*bh
)
1735 struct block_head
*blkh
;
1736 blkh
= B_BLK_HEAD(bh
);
1737 set_blkh_level(blkh
, FREE_LEVEL
);
1738 set_blkh_nr_item(blkh
, 0);
1740 clear_buffer_dirty(bh
);
1741 store_thrown(tb
, bh
);
1744 /* Replace n_dest'th key in buffer dest by n_src'th key of buffer src.*/
1745 void replace_key(struct tree_balance
*tb
, struct buffer_head
*dest
, int n_dest
,
1746 struct buffer_head
*src
, int n_src
)
1749 RFALSE(dest
== NULL
|| src
== NULL
,
1750 "vs-12305: source or destination buffer is 0 (src=%p, dest=%p)",
1752 RFALSE(!B_IS_KEYS_LEVEL(dest
),
1753 "vs-12310: invalid level (%z) for destination buffer. dest must be leaf",
1755 RFALSE(n_dest
< 0 || n_src
< 0,
1756 "vs-12315: src(%d) or dest(%d) key number < 0", n_src
, n_dest
);
1757 RFALSE(n_dest
>= B_NR_ITEMS(dest
) || n_src
>= B_NR_ITEMS(src
),
1758 "vs-12320: src(%d(%d)) or dest(%d(%d)) key number is too big",
1759 n_src
, B_NR_ITEMS(src
), n_dest
, B_NR_ITEMS(dest
));
1761 if (B_IS_ITEMS_LEVEL(src
))
1762 /* source buffer contains leaf node */
1763 memcpy(B_N_PDELIM_KEY(dest
, n_dest
), B_N_PITEM_HEAD(src
, n_src
),
1766 memcpy(B_N_PDELIM_KEY(dest
, n_dest
), B_N_PDELIM_KEY(src
, n_src
),
1769 do_balance_mark_internal_dirty(tb
, dest
, 0);
1772 int get_left_neighbor_position(struct tree_balance
*tb
, int h
)
1774 int Sh_position
= PATH_H_POSITION(tb
->tb_path
, h
+ 1);
1776 RFALSE(PATH_H_PPARENT(tb
->tb_path
, h
) == NULL
|| tb
->FL
[h
] == NULL
,
1777 "vs-12325: FL[%d](%p) or F[%d](%p) does not exist",
1778 h
, tb
->FL
[h
], h
, PATH_H_PPARENT(tb
->tb_path
, h
));
1780 if (Sh_position
== 0)
1781 return B_NR_ITEMS(tb
->FL
[h
]);
1783 return Sh_position
- 1;
1786 int get_right_neighbor_position(struct tree_balance
*tb
, int h
)
1788 int Sh_position
= PATH_H_POSITION(tb
->tb_path
, h
+ 1);
1790 RFALSE(PATH_H_PPARENT(tb
->tb_path
, h
) == NULL
|| tb
->FR
[h
] == NULL
,
1791 "vs-12330: F[%d](%p) or FR[%d](%p) does not exist",
1792 h
, PATH_H_PPARENT(tb
->tb_path
, h
), h
, tb
->FR
[h
]);
1794 if (Sh_position
== B_NR_ITEMS(PATH_H_PPARENT(tb
->tb_path
, h
)))
1797 return Sh_position
+ 1;
1800 #ifdef CONFIG_REISERFS_CHECK
1802 int is_reusable(struct super_block
*s
, b_blocknr_t block
, int bit_value
);
1803 static void check_internal_node(struct super_block
*s
, struct buffer_head
*bh
,
1806 struct disk_child
*dc
;
1809 RFALSE(!bh
, "PAP-12336: bh == 0");
1811 if (!bh
|| !B_IS_IN_TREE(bh
))
1814 RFALSE(!buffer_dirty(bh
) &&
1815 !(buffer_journaled(bh
) || buffer_journal_dirty(bh
)),
1816 "PAP-12337: buffer (%b) must be dirty", bh
);
1817 dc
= B_N_CHILD(bh
, 0);
1819 for (i
= 0; i
<= B_NR_ITEMS(bh
); i
++, dc
++) {
1820 if (!is_reusable(s
, dc_block_number(dc
), 1)) {
1822 reiserfs_panic(s
, "PAP-12338",
1823 "invalid child pointer %y in %b",
1829 static int locked_or_not_in_tree(struct tree_balance
*tb
,
1830 struct buffer_head
*bh
, char *which
)
1832 if ((!buffer_journal_prepared(bh
) && buffer_locked(bh
)) ||
1833 !B_IS_IN_TREE(bh
)) {
1834 reiserfs_warning(tb
->tb_sb
, "vs-12339", "%s (%b)", which
, bh
);
1840 static int check_before_balancing(struct tree_balance
*tb
)
1845 reiserfs_panic(tb
->tb_sb
, "vs-12335", "suspect that schedule "
1846 "occurred based on cur_tb not being null at "
1847 "this point in code. do_balance cannot properly "
1848 "handle schedule occurring while it runs.");
1851 /* double check that buffers that we will modify are unlocked. (fix_nodes should already have
1852 prepped all of these for us). */
1854 retval
|= locked_or_not_in_tree(tb
, tb
->L
[0], "L[0]");
1855 retval
|= locked_or_not_in_tree(tb
, tb
->FL
[0], "FL[0]");
1856 retval
|= locked_or_not_in_tree(tb
, tb
->CFL
[0], "CFL[0]");
1857 check_leaf(tb
->L
[0]);
1860 retval
|= locked_or_not_in_tree(tb
, tb
->R
[0], "R[0]");
1861 retval
|= locked_or_not_in_tree(tb
, tb
->FR
[0], "FR[0]");
1862 retval
|= locked_or_not_in_tree(tb
, tb
->CFR
[0], "CFR[0]");
1863 check_leaf(tb
->R
[0]);
1865 retval
|= locked_or_not_in_tree(tb
, PATH_PLAST_BUFFER(tb
->tb_path
),
1867 check_leaf(PATH_PLAST_BUFFER(tb
->tb_path
));
1872 static void check_after_balance_leaf(struct tree_balance
*tb
)
1875 if (B_FREE_SPACE(tb
->L
[0]) !=
1876 MAX_CHILD_SIZE(tb
->L
[0]) -
1878 (tb
->FL
[0], get_left_neighbor_position(tb
, 0)))) {
1879 print_cur_tb("12221");
1880 reiserfs_panic(tb
->tb_sb
, "PAP-12355",
1881 "shift to left was incorrect");
1885 if (B_FREE_SPACE(tb
->R
[0]) !=
1886 MAX_CHILD_SIZE(tb
->R
[0]) -
1888 (tb
->FR
[0], get_right_neighbor_position(tb
, 0)))) {
1889 print_cur_tb("12222");
1890 reiserfs_panic(tb
->tb_sb
, "PAP-12360",
1891 "shift to right was incorrect");
1894 if (PATH_H_PBUFFER(tb
->tb_path
, 1) &&
1895 (B_FREE_SPACE(PATH_H_PBUFFER(tb
->tb_path
, 0)) !=
1896 (MAX_CHILD_SIZE(PATH_H_PBUFFER(tb
->tb_path
, 0)) -
1897 dc_size(B_N_CHILD(PATH_H_PBUFFER(tb
->tb_path
, 1),
1898 PATH_H_POSITION(tb
->tb_path
, 1)))))) {
1899 int left
= B_FREE_SPACE(PATH_H_PBUFFER(tb
->tb_path
, 0));
1900 int right
= (MAX_CHILD_SIZE(PATH_H_PBUFFER(tb
->tb_path
, 0)) -
1901 dc_size(B_N_CHILD(PATH_H_PBUFFER(tb
->tb_path
, 1),
1902 PATH_H_POSITION(tb
->tb_path
,
1904 print_cur_tb("12223");
1905 reiserfs_warning(tb
->tb_sb
, "reiserfs-12363",
1906 "B_FREE_SPACE (PATH_H_PBUFFER(tb->tb_path,0)) = %d; "
1907 "MAX_CHILD_SIZE (%d) - dc_size( %y, %d ) [%d] = %d",
1909 MAX_CHILD_SIZE(PATH_H_PBUFFER(tb
->tb_path
, 0)),
1910 PATH_H_PBUFFER(tb
->tb_path
, 1),
1911 PATH_H_POSITION(tb
->tb_path
, 1),
1913 (PATH_H_PBUFFER(tb
->tb_path
, 1),
1914 PATH_H_POSITION(tb
->tb_path
, 1))),
1916 reiserfs_panic(tb
->tb_sb
, "PAP-12365", "S is incorrect");
1920 static void check_leaf_level(struct tree_balance
*tb
)
1922 check_leaf(tb
->L
[0]);
1923 check_leaf(tb
->R
[0]);
1924 check_leaf(PATH_PLAST_BUFFER(tb
->tb_path
));
1927 static void check_internal_levels(struct tree_balance
*tb
)
1931 /* check all internal nodes */
1932 for (h
= 1; tb
->insert_size
[h
]; h
++) {
1933 check_internal_node(tb
->tb_sb
, PATH_H_PBUFFER(tb
->tb_path
, h
),
1934 "BAD BUFFER ON PATH");
1936 check_internal_node(tb
->tb_sb
, tb
->L
[h
], "BAD L");
1938 check_internal_node(tb
->tb_sb
, tb
->R
[h
], "BAD R");
1945 /* Now we have all of the buffers that must be used in balancing of
1946 the tree. We rely on the assumption that schedule() will not occur
1947 while do_balance works. ( Only interrupt handlers are acceptable.)
1948 We balance the tree according to the analysis made before this,
1949 using buffers already obtained. For SMP support it will someday be
1950 necessary to add ordered locking of tb. */
1952 /* Some interesting rules of balancing:
1954 we delete a maximum of two nodes per level per balancing: we never
1955 delete R, when we delete two of three nodes L, S, R then we move
1958 we only delete L if we are deleting two nodes, if we delete only
1959 one node we delete S
1961 if we shift leaves then we shift as much as we can: this is a
1962 deliberate policy of extremism in node packing which results in
1963 higher average utilization after repeated random balance operations
1964 at the cost of more memory copies and more balancing as a result of
1965 small insertions to full nodes.
1967 if we shift internal nodes we try to evenly balance the node
1968 utilization, with consequent less balancing at the cost of lower
1971 one could argue that the policy for directories in leaves should be
1972 that of internal nodes, but we will wait until another day to
1973 evaluate this.... It would be nice to someday measure and prove
1974 these assumptions as to what is optimal....
1978 static inline void do_balance_starts(struct tree_balance
*tb
)
1980 /* use print_cur_tb() to see initial state of struct
1983 /* store_print_tb (tb); */
1985 /* do not delete, just comment it out */
1986 /* print_tb(flag, PATH_LAST_POSITION(tb->tb_path), tb->tb_path->pos_in_item, tb,
1988 RFALSE(check_before_balancing(tb
), "PAP-12340: locked buffers in TB");
1989 #ifdef CONFIG_REISERFS_CHECK
1994 static inline void do_balance_completed(struct tree_balance
*tb
)
1997 #ifdef CONFIG_REISERFS_CHECK
1998 check_leaf_level(tb
);
1999 check_internal_levels(tb
);
2003 /* reiserfs_free_block is no longer schedule safe. So, we need to
2004 ** put the buffers we want freed on the thrown list during do_balance,
2005 ** and then free them now
2008 REISERFS_SB(tb
->tb_sb
)->s_do_balance
++;
2010 /* release all nodes hold to perform the balancing */
2016 void do_balance(struct tree_balance
*tb
, /* tree_balance structure */
2017 struct item_head
*ih
, /* item header of inserted item */
2018 const char *body
, /* body of inserted item or bytes to paste */
2020 { /* i - insert, d - delete
2023 Cut means delete part of an item
2024 (includes removing an entry from a
2027 Delete means delete whole item.
2029 Insert means add a new item into the
2032 Paste means to append to the end of an
2033 existing file or to insert a directory
2035 int child_pos
, /* position of a child node in its parent */
2036 h
; /* level of the tree being processed */
2037 struct item_head insert_key
[2]; /* in our processing of one level
2038 we sometimes determine what
2039 must be inserted into the next
2040 higher level. This insertion
2041 consists of a key or two keys
2042 and their corresponding
2044 struct buffer_head
*insert_ptr
[2]; /* inserted node-ptrs for the next
2048 tb
->need_balance_dirty
= 0;
2050 if (FILESYSTEM_CHANGED_TB(tb
)) {
2051 reiserfs_panic(tb
->tb_sb
, "clm-6000", "fs generation has "
2054 /* if we have no real work to do */
2055 if (!tb
->insert_size
[0]) {
2056 reiserfs_warning(tb
->tb_sb
, "PAP-12350",
2057 "insert_size == 0, mode == %c", flag
);
2062 atomic_inc(&(fs_generation(tb
->tb_sb
)));
2063 do_balance_starts(tb
);
2065 /* balance leaf returns 0 except if combining L R and S into
2066 one node. see balance_internal() for explanation of this
2068 child_pos
= PATH_H_B_ITEM_ORDER(tb
->tb_path
, 0) +
2069 balance_leaf(tb
, ih
, body
, flag
, insert_key
, insert_ptr
);
2071 #ifdef CONFIG_REISERFS_CHECK
2072 check_after_balance_leaf(tb
);
2075 /* Balance internal level of the tree. */
2076 for (h
= 1; h
< MAX_HEIGHT
&& tb
->insert_size
[h
]; h
++)
2078 balance_internal(tb
, h
, child_pos
, insert_key
, insert_ptr
);
2080 do_balance_completed(tb
);