2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_types.h"
23 #include "xfs_trans.h"
26 #include "xfs_mount.h"
27 #include "xfs_da_btree.h"
28 #include "xfs_bmap_btree.h"
30 #include "xfs_dir2_format.h"
31 #include "xfs_dir2_priv.h"
32 #include "xfs_dinode.h"
33 #include "xfs_inode.h"
34 #include "xfs_inode_item.h"
35 #include "xfs_alloc.h"
38 #include "xfs_attr_leaf.h"
39 #include "xfs_error.h"
40 #include "xfs_trace.h"
45 * Routines to implement directories as Btrees of hashed names.
48 /*========================================================================
49 * Function prototypes for the kernel.
50 *========================================================================*/
53 * Routines used for growing the Btree.
55 STATIC
int xfs_da_root_split(xfs_da_state_t
*state
,
56 xfs_da_state_blk_t
*existing_root
,
57 xfs_da_state_blk_t
*new_child
);
58 STATIC
int xfs_da_node_split(xfs_da_state_t
*state
,
59 xfs_da_state_blk_t
*existing_blk
,
60 xfs_da_state_blk_t
*split_blk
,
61 xfs_da_state_blk_t
*blk_to_add
,
64 STATIC
void xfs_da_node_rebalance(xfs_da_state_t
*state
,
65 xfs_da_state_blk_t
*node_blk_1
,
66 xfs_da_state_blk_t
*node_blk_2
);
67 STATIC
void xfs_da_node_add(xfs_da_state_t
*state
,
68 xfs_da_state_blk_t
*old_node_blk
,
69 xfs_da_state_blk_t
*new_node_blk
);
72 * Routines used for shrinking the Btree.
74 STATIC
int xfs_da_root_join(xfs_da_state_t
*state
,
75 xfs_da_state_blk_t
*root_blk
);
76 STATIC
int xfs_da_node_toosmall(xfs_da_state_t
*state
, int *retval
);
77 STATIC
void xfs_da_node_remove(xfs_da_state_t
*state
,
78 xfs_da_state_blk_t
*drop_blk
);
79 STATIC
void xfs_da_node_unbalance(xfs_da_state_t
*state
,
80 xfs_da_state_blk_t
*src_node_blk
,
81 xfs_da_state_blk_t
*dst_node_blk
);
86 STATIC uint
xfs_da_node_lasthash(xfs_dabuf_t
*bp
, int *count
);
87 STATIC
int xfs_da_node_order(xfs_dabuf_t
*node1_bp
, xfs_dabuf_t
*node2_bp
);
88 STATIC xfs_dabuf_t
*xfs_da_buf_make(int nbuf
, xfs_buf_t
**bps
);
89 STATIC
int xfs_da_blk_unlink(xfs_da_state_t
*state
,
90 xfs_da_state_blk_t
*drop_blk
,
91 xfs_da_state_blk_t
*save_blk
);
92 STATIC
void xfs_da_state_kill_altpath(xfs_da_state_t
*state
);
94 /*========================================================================
95 * Routines used for growing the Btree.
96 *========================================================================*/
99 * Create the initial contents of an intermediate node.
102 xfs_da_node_create(xfs_da_args_t
*args
, xfs_dablk_t blkno
, int level
,
103 xfs_dabuf_t
**bpp
, int whichfork
)
105 xfs_da_intnode_t
*node
;
110 trace_xfs_da_node_create(args
);
113 error
= xfs_da_get_buf(tp
, args
->dp
, blkno
, -1, &bp
, whichfork
);
118 node
->hdr
.info
.forw
= 0;
119 node
->hdr
.info
.back
= 0;
120 node
->hdr
.info
.magic
= cpu_to_be16(XFS_DA_NODE_MAGIC
);
121 node
->hdr
.info
.pad
= 0;
123 node
->hdr
.level
= cpu_to_be16(level
);
125 xfs_da_log_buf(tp
, bp
,
126 XFS_DA_LOGRANGE(node
, &node
->hdr
, sizeof(node
->hdr
)));
133 * Split a leaf node, rebalance, then possibly split
134 * intermediate nodes, rebalance, etc.
137 xfs_da_split(xfs_da_state_t
*state
)
139 xfs_da_state_blk_t
*oldblk
, *newblk
, *addblk
;
140 xfs_da_intnode_t
*node
;
142 int max
, action
, error
, i
;
144 trace_xfs_da_split(state
->args
);
147 * Walk back up the tree splitting/inserting/adjusting as necessary.
148 * If we need to insert and there isn't room, split the node, then
149 * decide which fragment to insert the new block from below into.
150 * Note that we may split the root this way, but we need more fixup.
152 max
= state
->path
.active
- 1;
153 ASSERT((max
>= 0) && (max
< XFS_DA_NODE_MAXDEPTH
));
154 ASSERT(state
->path
.blk
[max
].magic
== XFS_ATTR_LEAF_MAGIC
||
155 state
->path
.blk
[max
].magic
== XFS_DIR2_LEAFN_MAGIC
);
157 addblk
= &state
->path
.blk
[max
]; /* initial dummy value */
158 for (i
= max
; (i
>= 0) && addblk
; state
->path
.active
--, i
--) {
159 oldblk
= &state
->path
.blk
[i
];
160 newblk
= &state
->altpath
.blk
[i
];
163 * If a leaf node then
164 * Allocate a new leaf node, then rebalance across them.
165 * else if an intermediate node then
166 * We split on the last layer, must we split the node?
168 switch (oldblk
->magic
) {
169 case XFS_ATTR_LEAF_MAGIC
:
170 error
= xfs_attr_leaf_split(state
, oldblk
, newblk
);
171 if ((error
!= 0) && (error
!= ENOSPC
)) {
172 return(error
); /* GROT: attr is inconsistent */
179 * Entry wouldn't fit, split the leaf again.
181 state
->extravalid
= 1;
183 state
->extraafter
= 0; /* before newblk */
184 trace_xfs_attr_leaf_split_before(state
->args
);
185 error
= xfs_attr_leaf_split(state
, oldblk
,
188 state
->extraafter
= 1; /* after newblk */
189 trace_xfs_attr_leaf_split_after(state
->args
);
190 error
= xfs_attr_leaf_split(state
, newblk
,
194 return(error
); /* GROT: attr inconsistent */
197 case XFS_DIR2_LEAFN_MAGIC
:
198 error
= xfs_dir2_leafn_split(state
, oldblk
, newblk
);
203 case XFS_DA_NODE_MAGIC
:
204 error
= xfs_da_node_split(state
, oldblk
, newblk
, addblk
,
206 xfs_da_buf_done(addblk
->bp
);
209 return(error
); /* GROT: dir is inconsistent */
211 * Record the newly split block for the next time thru?
221 * Update the btree to show the new hashval for this child.
223 xfs_da_fixhashpath(state
, &state
->path
);
225 * If we won't need this block again, it's getting dropped
226 * from the active path by the loop control, so we need
227 * to mark it done now.
229 if (i
> 0 || !addblk
)
230 xfs_da_buf_done(oldblk
->bp
);
236 * Split the root node.
238 ASSERT(state
->path
.active
== 0);
239 oldblk
= &state
->path
.blk
[0];
240 error
= xfs_da_root_split(state
, oldblk
, addblk
);
242 xfs_da_buf_done(oldblk
->bp
);
243 xfs_da_buf_done(addblk
->bp
);
245 return(error
); /* GROT: dir is inconsistent */
249 * Update pointers to the node which used to be block 0 and
250 * just got bumped because of the addition of a new root node.
251 * There might be three blocks involved if a double split occurred,
252 * and the original block 0 could be at any position in the list.
255 node
= oldblk
->bp
->data
;
256 if (node
->hdr
.info
.forw
) {
257 if (be32_to_cpu(node
->hdr
.info
.forw
) == addblk
->blkno
) {
260 ASSERT(state
->extravalid
);
261 bp
= state
->extrablk
.bp
;
264 node
->hdr
.info
.back
= cpu_to_be32(oldblk
->blkno
);
265 xfs_da_log_buf(state
->args
->trans
, bp
,
266 XFS_DA_LOGRANGE(node
, &node
->hdr
.info
,
267 sizeof(node
->hdr
.info
)));
269 node
= oldblk
->bp
->data
;
270 if (node
->hdr
.info
.back
) {
271 if (be32_to_cpu(node
->hdr
.info
.back
) == addblk
->blkno
) {
274 ASSERT(state
->extravalid
);
275 bp
= state
->extrablk
.bp
;
278 node
->hdr
.info
.forw
= cpu_to_be32(oldblk
->blkno
);
279 xfs_da_log_buf(state
->args
->trans
, bp
,
280 XFS_DA_LOGRANGE(node
, &node
->hdr
.info
,
281 sizeof(node
->hdr
.info
)));
283 xfs_da_buf_done(oldblk
->bp
);
284 xfs_da_buf_done(addblk
->bp
);
290 * Split the root. We have to create a new root and point to the two
291 * parts (the split old root) that we just created. Copy block zero to
292 * the EOF, extending the inode in process.
294 STATIC
int /* error */
295 xfs_da_root_split(xfs_da_state_t
*state
, xfs_da_state_blk_t
*blk1
,
296 xfs_da_state_blk_t
*blk2
)
298 xfs_da_intnode_t
*node
, *oldroot
;
306 xfs_dir2_leaf_t
*leaf
;
308 trace_xfs_da_root_split(state
->args
);
311 * Copy the existing (incorrect) block from the root node position
312 * to a free space somewhere.
315 ASSERT(args
!= NULL
);
316 error
= xfs_da_grow_inode(args
, &blkno
);
322 error
= xfs_da_get_buf(tp
, dp
, blkno
, -1, &bp
, args
->whichfork
);
327 oldroot
= blk1
->bp
->data
;
328 if (oldroot
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
)) {
329 size
= (int)((char *)&oldroot
->btree
[be16_to_cpu(oldroot
->hdr
.count
)] -
332 ASSERT(oldroot
->hdr
.info
.magic
== cpu_to_be16(XFS_DIR2_LEAFN_MAGIC
));
333 leaf
= (xfs_dir2_leaf_t
*)oldroot
;
334 size
= (int)((char *)&leaf
->ents
[be16_to_cpu(leaf
->hdr
.count
)] -
337 memcpy(node
, oldroot
, size
);
338 xfs_da_log_buf(tp
, bp
, 0, size
- 1);
339 xfs_da_buf_done(blk1
->bp
);
344 * Set up the new root node.
346 error
= xfs_da_node_create(args
,
347 (args
->whichfork
== XFS_DATA_FORK
) ? mp
->m_dirleafblk
: 0,
348 be16_to_cpu(node
->hdr
.level
) + 1, &bp
, args
->whichfork
);
352 node
->btree
[0].hashval
= cpu_to_be32(blk1
->hashval
);
353 node
->btree
[0].before
= cpu_to_be32(blk1
->blkno
);
354 node
->btree
[1].hashval
= cpu_to_be32(blk2
->hashval
);
355 node
->btree
[1].before
= cpu_to_be32(blk2
->blkno
);
356 node
->hdr
.count
= cpu_to_be16(2);
359 if (oldroot
->hdr
.info
.magic
== cpu_to_be16(XFS_DIR2_LEAFN_MAGIC
)) {
360 ASSERT(blk1
->blkno
>= mp
->m_dirleafblk
&&
361 blk1
->blkno
< mp
->m_dirfreeblk
);
362 ASSERT(blk2
->blkno
>= mp
->m_dirleafblk
&&
363 blk2
->blkno
< mp
->m_dirfreeblk
);
367 /* Header is already logged by xfs_da_node_create */
368 xfs_da_log_buf(tp
, bp
,
369 XFS_DA_LOGRANGE(node
, node
->btree
,
370 sizeof(xfs_da_node_entry_t
) * 2));
377 * Split the node, rebalance, then add the new entry.
379 STATIC
int /* error */
380 xfs_da_node_split(xfs_da_state_t
*state
, xfs_da_state_blk_t
*oldblk
,
381 xfs_da_state_blk_t
*newblk
,
382 xfs_da_state_blk_t
*addblk
,
383 int treelevel
, int *result
)
385 xfs_da_intnode_t
*node
;
390 trace_xfs_da_node_split(state
->args
);
392 node
= oldblk
->bp
->data
;
393 ASSERT(node
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
396 * With V2 dirs the extra block is data or freespace.
398 useextra
= state
->extravalid
&& state
->args
->whichfork
== XFS_ATTR_FORK
;
399 newcount
= 1 + useextra
;
401 * Do we have to split the node?
403 if ((be16_to_cpu(node
->hdr
.count
) + newcount
) > state
->node_ents
) {
405 * Allocate a new node, add to the doubly linked chain of
406 * nodes, then move some of our excess entries into it.
408 error
= xfs_da_grow_inode(state
->args
, &blkno
);
410 return(error
); /* GROT: dir is inconsistent */
412 error
= xfs_da_node_create(state
->args
, blkno
, treelevel
,
413 &newblk
->bp
, state
->args
->whichfork
);
415 return(error
); /* GROT: dir is inconsistent */
416 newblk
->blkno
= blkno
;
417 newblk
->magic
= XFS_DA_NODE_MAGIC
;
418 xfs_da_node_rebalance(state
, oldblk
, newblk
);
419 error
= xfs_da_blk_link(state
, oldblk
, newblk
);
428 * Insert the new entry(s) into the correct block
429 * (updating last hashval in the process).
431 * xfs_da_node_add() inserts BEFORE the given index,
432 * and as a result of using node_lookup_int() we always
433 * point to a valid entry (not after one), but a split
434 * operation always results in a new block whose hashvals
435 * FOLLOW the current block.
437 * If we had double-split op below us, then add the extra block too.
439 node
= oldblk
->bp
->data
;
440 if (oldblk
->index
<= be16_to_cpu(node
->hdr
.count
)) {
442 xfs_da_node_add(state
, oldblk
, addblk
);
444 if (state
->extraafter
)
446 xfs_da_node_add(state
, oldblk
, &state
->extrablk
);
447 state
->extravalid
= 0;
451 xfs_da_node_add(state
, newblk
, addblk
);
453 if (state
->extraafter
)
455 xfs_da_node_add(state
, newblk
, &state
->extrablk
);
456 state
->extravalid
= 0;
464 * Balance the btree elements between two intermediate nodes,
465 * usually one full and one empty.
467 * NOTE: if blk2 is empty, then it will get the upper half of blk1.
470 xfs_da_node_rebalance(xfs_da_state_t
*state
, xfs_da_state_blk_t
*blk1
,
471 xfs_da_state_blk_t
*blk2
)
473 xfs_da_intnode_t
*node1
, *node2
, *tmpnode
;
474 xfs_da_node_entry_t
*btree_s
, *btree_d
;
478 trace_xfs_da_node_rebalance(state
->args
);
480 node1
= blk1
->bp
->data
;
481 node2
= blk2
->bp
->data
;
483 * Figure out how many entries need to move, and in which direction.
484 * Swap the nodes around if that makes it simpler.
486 if ((be16_to_cpu(node1
->hdr
.count
) > 0) && (be16_to_cpu(node2
->hdr
.count
) > 0) &&
487 ((be32_to_cpu(node2
->btree
[0].hashval
) < be32_to_cpu(node1
->btree
[0].hashval
)) ||
488 (be32_to_cpu(node2
->btree
[be16_to_cpu(node2
->hdr
.count
)-1].hashval
) <
489 be32_to_cpu(node1
->btree
[be16_to_cpu(node1
->hdr
.count
)-1].hashval
)))) {
494 ASSERT(node1
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
495 ASSERT(node2
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
496 count
= (be16_to_cpu(node1
->hdr
.count
) - be16_to_cpu(node2
->hdr
.count
)) / 2;
499 tp
= state
->args
->trans
;
501 * Two cases: high-to-low and low-to-high.
505 * Move elements in node2 up to make a hole.
507 if ((tmp
= be16_to_cpu(node2
->hdr
.count
)) > 0) {
508 tmp
*= (uint
)sizeof(xfs_da_node_entry_t
);
509 btree_s
= &node2
->btree
[0];
510 btree_d
= &node2
->btree
[count
];
511 memmove(btree_d
, btree_s
, tmp
);
515 * Move the req'd B-tree elements from high in node1 to
518 be16_add_cpu(&node2
->hdr
.count
, count
);
519 tmp
= count
* (uint
)sizeof(xfs_da_node_entry_t
);
520 btree_s
= &node1
->btree
[be16_to_cpu(node1
->hdr
.count
) - count
];
521 btree_d
= &node2
->btree
[0];
522 memcpy(btree_d
, btree_s
, tmp
);
523 be16_add_cpu(&node1
->hdr
.count
, -count
);
526 * Move the req'd B-tree elements from low in node2 to
530 tmp
= count
* (uint
)sizeof(xfs_da_node_entry_t
);
531 btree_s
= &node2
->btree
[0];
532 btree_d
= &node1
->btree
[be16_to_cpu(node1
->hdr
.count
)];
533 memcpy(btree_d
, btree_s
, tmp
);
534 be16_add_cpu(&node1
->hdr
.count
, count
);
535 xfs_da_log_buf(tp
, blk1
->bp
,
536 XFS_DA_LOGRANGE(node1
, btree_d
, tmp
));
539 * Move elements in node2 down to fill the hole.
541 tmp
= be16_to_cpu(node2
->hdr
.count
) - count
;
542 tmp
*= (uint
)sizeof(xfs_da_node_entry_t
);
543 btree_s
= &node2
->btree
[count
];
544 btree_d
= &node2
->btree
[0];
545 memmove(btree_d
, btree_s
, tmp
);
546 be16_add_cpu(&node2
->hdr
.count
, -count
);
550 * Log header of node 1 and all current bits of node 2.
552 xfs_da_log_buf(tp
, blk1
->bp
,
553 XFS_DA_LOGRANGE(node1
, &node1
->hdr
, sizeof(node1
->hdr
)));
554 xfs_da_log_buf(tp
, blk2
->bp
,
555 XFS_DA_LOGRANGE(node2
, &node2
->hdr
,
557 sizeof(node2
->btree
[0]) * be16_to_cpu(node2
->hdr
.count
)));
560 * Record the last hashval from each block for upward propagation.
561 * (note: don't use the swapped node pointers)
563 node1
= blk1
->bp
->data
;
564 node2
= blk2
->bp
->data
;
565 blk1
->hashval
= be32_to_cpu(node1
->btree
[be16_to_cpu(node1
->hdr
.count
)-1].hashval
);
566 blk2
->hashval
= be32_to_cpu(node2
->btree
[be16_to_cpu(node2
->hdr
.count
)-1].hashval
);
569 * Adjust the expected index for insertion.
571 if (blk1
->index
>= be16_to_cpu(node1
->hdr
.count
)) {
572 blk2
->index
= blk1
->index
- be16_to_cpu(node1
->hdr
.count
);
573 blk1
->index
= be16_to_cpu(node1
->hdr
.count
) + 1; /* make it invalid */
578 * Add a new entry to an intermediate node.
581 xfs_da_node_add(xfs_da_state_t
*state
, xfs_da_state_blk_t
*oldblk
,
582 xfs_da_state_blk_t
*newblk
)
584 xfs_da_intnode_t
*node
;
585 xfs_da_node_entry_t
*btree
;
588 trace_xfs_da_node_add(state
->args
);
590 node
= oldblk
->bp
->data
;
591 ASSERT(node
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
592 ASSERT((oldblk
->index
>= 0) && (oldblk
->index
<= be16_to_cpu(node
->hdr
.count
)));
593 ASSERT(newblk
->blkno
!= 0);
594 if (state
->args
->whichfork
== XFS_DATA_FORK
)
595 ASSERT(newblk
->blkno
>= state
->mp
->m_dirleafblk
&&
596 newblk
->blkno
< state
->mp
->m_dirfreeblk
);
599 * We may need to make some room before we insert the new node.
602 btree
= &node
->btree
[ oldblk
->index
];
603 if (oldblk
->index
< be16_to_cpu(node
->hdr
.count
)) {
604 tmp
= (be16_to_cpu(node
->hdr
.count
) - oldblk
->index
) * (uint
)sizeof(*btree
);
605 memmove(btree
+ 1, btree
, tmp
);
607 btree
->hashval
= cpu_to_be32(newblk
->hashval
);
608 btree
->before
= cpu_to_be32(newblk
->blkno
);
609 xfs_da_log_buf(state
->args
->trans
, oldblk
->bp
,
610 XFS_DA_LOGRANGE(node
, btree
, tmp
+ sizeof(*btree
)));
611 be16_add_cpu(&node
->hdr
.count
, 1);
612 xfs_da_log_buf(state
->args
->trans
, oldblk
->bp
,
613 XFS_DA_LOGRANGE(node
, &node
->hdr
, sizeof(node
->hdr
)));
616 * Copy the last hash value from the oldblk to propagate upwards.
618 oldblk
->hashval
= be32_to_cpu(node
->btree
[be16_to_cpu(node
->hdr
.count
)-1 ].hashval
);
621 /*========================================================================
622 * Routines used for shrinking the Btree.
623 *========================================================================*/
626 * Deallocate an empty leaf node, remove it from its parent,
627 * possibly deallocating that block, etc...
630 xfs_da_join(xfs_da_state_t
*state
)
632 xfs_da_state_blk_t
*drop_blk
, *save_blk
;
635 trace_xfs_da_join(state
->args
);
638 drop_blk
= &state
->path
.blk
[ state
->path
.active
-1 ];
639 save_blk
= &state
->altpath
.blk
[ state
->path
.active
-1 ];
640 ASSERT(state
->path
.blk
[0].magic
== XFS_DA_NODE_MAGIC
);
641 ASSERT(drop_blk
->magic
== XFS_ATTR_LEAF_MAGIC
||
642 drop_blk
->magic
== XFS_DIR2_LEAFN_MAGIC
);
645 * Walk back up the tree joining/deallocating as necessary.
646 * When we stop dropping blocks, break out.
648 for ( ; state
->path
.active
>= 2; drop_blk
--, save_blk
--,
649 state
->path
.active
--) {
651 * See if we can combine the block with a neighbor.
652 * (action == 0) => no options, just leave
653 * (action == 1) => coalesce, then unlink
654 * (action == 2) => block empty, unlink it
656 switch (drop_blk
->magic
) {
657 case XFS_ATTR_LEAF_MAGIC
:
658 error
= xfs_attr_leaf_toosmall(state
, &action
);
663 xfs_attr_leaf_unbalance(state
, drop_blk
, save_blk
);
665 case XFS_DIR2_LEAFN_MAGIC
:
666 error
= xfs_dir2_leafn_toosmall(state
, &action
);
671 xfs_dir2_leafn_unbalance(state
, drop_blk
, save_blk
);
673 case XFS_DA_NODE_MAGIC
:
675 * Remove the offending node, fixup hashvals,
676 * check for a toosmall neighbor.
678 xfs_da_node_remove(state
, drop_blk
);
679 xfs_da_fixhashpath(state
, &state
->path
);
680 error
= xfs_da_node_toosmall(state
, &action
);
685 xfs_da_node_unbalance(state
, drop_blk
, save_blk
);
688 xfs_da_fixhashpath(state
, &state
->altpath
);
689 error
= xfs_da_blk_unlink(state
, drop_blk
, save_blk
);
690 xfs_da_state_kill_altpath(state
);
693 error
= xfs_da_shrink_inode(state
->args
, drop_blk
->blkno
,
700 * We joined all the way to the top. If it turns out that
701 * we only have one entry in the root, make the child block
704 xfs_da_node_remove(state
, drop_blk
);
705 xfs_da_fixhashpath(state
, &state
->path
);
706 error
= xfs_da_root_join(state
, &state
->path
.blk
[0]);
712 xfs_da_blkinfo_onlychild_validate(struct xfs_da_blkinfo
*blkinfo
, __u16 level
)
714 __be16 magic
= blkinfo
->magic
;
717 ASSERT(magic
== cpu_to_be16(XFS_DIR2_LEAFN_MAGIC
) ||
718 magic
== cpu_to_be16(XFS_ATTR_LEAF_MAGIC
));
720 ASSERT(magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
721 ASSERT(!blkinfo
->forw
);
722 ASSERT(!blkinfo
->back
);
725 #define xfs_da_blkinfo_onlychild_validate(blkinfo, level)
729 * We have only one entry in the root. Copy the only remaining child of
730 * the old root to block 0 as the new root node.
733 xfs_da_root_join(xfs_da_state_t
*state
, xfs_da_state_blk_t
*root_blk
)
735 xfs_da_intnode_t
*oldroot
;
741 trace_xfs_da_root_join(state
->args
);
744 ASSERT(args
!= NULL
);
745 ASSERT(root_blk
->magic
== XFS_DA_NODE_MAGIC
);
746 oldroot
= root_blk
->bp
->data
;
747 ASSERT(oldroot
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
748 ASSERT(!oldroot
->hdr
.info
.forw
);
749 ASSERT(!oldroot
->hdr
.info
.back
);
752 * If the root has more than one child, then don't do anything.
754 if (be16_to_cpu(oldroot
->hdr
.count
) > 1)
758 * Read in the (only) child block, then copy those bytes into
759 * the root block's buffer and free the original child block.
761 child
= be32_to_cpu(oldroot
->btree
[0].before
);
763 error
= xfs_da_read_buf(args
->trans
, args
->dp
, child
, -1, &bp
,
768 xfs_da_blkinfo_onlychild_validate(bp
->data
,
769 be16_to_cpu(oldroot
->hdr
.level
));
771 memcpy(root_blk
->bp
->data
, bp
->data
, state
->blocksize
);
772 xfs_da_log_buf(args
->trans
, root_blk
->bp
, 0, state
->blocksize
- 1);
773 error
= xfs_da_shrink_inode(args
, child
, bp
);
778 * Check a node block and its neighbors to see if the block should be
779 * collapsed into one or the other neighbor. Always keep the block
780 * with the smaller block number.
781 * If the current block is over 50% full, don't try to join it, return 0.
782 * If the block is empty, fill in the state structure and return 2.
783 * If it can be collapsed, fill in the state structure and return 1.
784 * If nothing can be done, return 0.
787 xfs_da_node_toosmall(xfs_da_state_t
*state
, int *action
)
789 xfs_da_intnode_t
*node
;
790 xfs_da_state_blk_t
*blk
;
791 xfs_da_blkinfo_t
*info
;
792 int count
, forward
, error
, retval
, i
;
797 * Check for the degenerate case of the block being over 50% full.
798 * If so, it's not worth even looking to see if we might be able
799 * to coalesce with a sibling.
801 blk
= &state
->path
.blk
[ state
->path
.active
-1 ];
802 info
= blk
->bp
->data
;
803 ASSERT(info
->magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
804 node
= (xfs_da_intnode_t
*)info
;
805 count
= be16_to_cpu(node
->hdr
.count
);
806 if (count
> (state
->node_ents
>> 1)) {
807 *action
= 0; /* blk over 50%, don't try to join */
808 return(0); /* blk over 50%, don't try to join */
812 * Check for the degenerate case of the block being empty.
813 * If the block is empty, we'll simply delete it, no need to
814 * coalesce it with a sibling block. We choose (arbitrarily)
815 * to merge with the forward block unless it is NULL.
819 * Make altpath point to the block we want to keep and
820 * path point to the block we want to drop (this one).
822 forward
= (info
->forw
!= 0);
823 memcpy(&state
->altpath
, &state
->path
, sizeof(state
->path
));
824 error
= xfs_da_path_shift(state
, &state
->altpath
, forward
,
837 * Examine each sibling block to see if we can coalesce with
838 * at least 25% free space to spare. We need to figure out
839 * whether to merge with the forward or the backward block.
840 * We prefer coalescing with the lower numbered sibling so as
841 * to shrink a directory over time.
843 /* start with smaller blk num */
844 forward
= (be32_to_cpu(info
->forw
) < be32_to_cpu(info
->back
));
845 for (i
= 0; i
< 2; forward
= !forward
, i
++) {
847 blkno
= be32_to_cpu(info
->forw
);
849 blkno
= be32_to_cpu(info
->back
);
852 error
= xfs_da_read_buf(state
->args
->trans
, state
->args
->dp
,
853 blkno
, -1, &bp
, state
->args
->whichfork
);
858 node
= (xfs_da_intnode_t
*)info
;
859 count
= state
->node_ents
;
860 count
-= state
->node_ents
>> 2;
861 count
-= be16_to_cpu(node
->hdr
.count
);
863 ASSERT(node
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
864 count
-= be16_to_cpu(node
->hdr
.count
);
865 xfs_da_brelse(state
->args
->trans
, bp
);
867 break; /* fits with at least 25% to spare */
875 * Make altpath point to the block we want to keep (the lower
876 * numbered block) and path point to the block we want to drop.
878 memcpy(&state
->altpath
, &state
->path
, sizeof(state
->path
));
879 if (blkno
< blk
->blkno
) {
880 error
= xfs_da_path_shift(state
, &state
->altpath
, forward
,
890 error
= xfs_da_path_shift(state
, &state
->path
, forward
,
905 * Walk back up the tree adjusting hash values as necessary,
906 * when we stop making changes, return.
909 xfs_da_fixhashpath(xfs_da_state_t
*state
, xfs_da_state_path_t
*path
)
911 xfs_da_state_blk_t
*blk
;
912 xfs_da_intnode_t
*node
;
913 xfs_da_node_entry_t
*btree
;
914 xfs_dahash_t lasthash
=0;
917 level
= path
->active
-1;
918 blk
= &path
->blk
[ level
];
919 switch (blk
->magic
) {
920 case XFS_ATTR_LEAF_MAGIC
:
921 lasthash
= xfs_attr_leaf_lasthash(blk
->bp
, &count
);
925 case XFS_DIR2_LEAFN_MAGIC
:
926 lasthash
= xfs_dir2_leafn_lasthash(blk
->bp
, &count
);
930 case XFS_DA_NODE_MAGIC
:
931 lasthash
= xfs_da_node_lasthash(blk
->bp
, &count
);
936 for (blk
--, level
--; level
>= 0; blk
--, level
--) {
937 node
= blk
->bp
->data
;
938 ASSERT(node
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
939 btree
= &node
->btree
[ blk
->index
];
940 if (be32_to_cpu(btree
->hashval
) == lasthash
)
942 blk
->hashval
= lasthash
;
943 btree
->hashval
= cpu_to_be32(lasthash
);
944 xfs_da_log_buf(state
->args
->trans
, blk
->bp
,
945 XFS_DA_LOGRANGE(node
, btree
, sizeof(*btree
)));
947 lasthash
= be32_to_cpu(node
->btree
[be16_to_cpu(node
->hdr
.count
)-1].hashval
);
952 * Remove an entry from an intermediate node.
955 xfs_da_node_remove(xfs_da_state_t
*state
, xfs_da_state_blk_t
*drop_blk
)
957 xfs_da_intnode_t
*node
;
958 xfs_da_node_entry_t
*btree
;
961 trace_xfs_da_node_remove(state
->args
);
963 node
= drop_blk
->bp
->data
;
964 ASSERT(drop_blk
->index
< be16_to_cpu(node
->hdr
.count
));
965 ASSERT(drop_blk
->index
>= 0);
968 * Copy over the offending entry, or just zero it out.
970 btree
= &node
->btree
[drop_blk
->index
];
971 if (drop_blk
->index
< (be16_to_cpu(node
->hdr
.count
)-1)) {
972 tmp
= be16_to_cpu(node
->hdr
.count
) - drop_blk
->index
- 1;
973 tmp
*= (uint
)sizeof(xfs_da_node_entry_t
);
974 memmove(btree
, btree
+ 1, tmp
);
975 xfs_da_log_buf(state
->args
->trans
, drop_blk
->bp
,
976 XFS_DA_LOGRANGE(node
, btree
, tmp
));
977 btree
= &node
->btree
[be16_to_cpu(node
->hdr
.count
)-1];
979 memset((char *)btree
, 0, sizeof(xfs_da_node_entry_t
));
980 xfs_da_log_buf(state
->args
->trans
, drop_blk
->bp
,
981 XFS_DA_LOGRANGE(node
, btree
, sizeof(*btree
)));
982 be16_add_cpu(&node
->hdr
.count
, -1);
983 xfs_da_log_buf(state
->args
->trans
, drop_blk
->bp
,
984 XFS_DA_LOGRANGE(node
, &node
->hdr
, sizeof(node
->hdr
)));
987 * Copy the last hash value from the block to propagate upwards.
990 drop_blk
->hashval
= be32_to_cpu(btree
->hashval
);
994 * Unbalance the btree elements between two intermediate nodes,
995 * move all Btree elements from one node into another.
998 xfs_da_node_unbalance(xfs_da_state_t
*state
, xfs_da_state_blk_t
*drop_blk
,
999 xfs_da_state_blk_t
*save_blk
)
1001 xfs_da_intnode_t
*drop_node
, *save_node
;
1002 xfs_da_node_entry_t
*btree
;
1006 trace_xfs_da_node_unbalance(state
->args
);
1008 drop_node
= drop_blk
->bp
->data
;
1009 save_node
= save_blk
->bp
->data
;
1010 ASSERT(drop_node
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
1011 ASSERT(save_node
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
1012 tp
= state
->args
->trans
;
1015 * If the dying block has lower hashvals, then move all the
1016 * elements in the remaining block up to make a hole.
1018 if ((be32_to_cpu(drop_node
->btree
[0].hashval
) < be32_to_cpu(save_node
->btree
[ 0 ].hashval
)) ||
1019 (be32_to_cpu(drop_node
->btree
[be16_to_cpu(drop_node
->hdr
.count
)-1].hashval
) <
1020 be32_to_cpu(save_node
->btree
[be16_to_cpu(save_node
->hdr
.count
)-1].hashval
)))
1022 btree
= &save_node
->btree
[be16_to_cpu(drop_node
->hdr
.count
)];
1023 tmp
= be16_to_cpu(save_node
->hdr
.count
) * (uint
)sizeof(xfs_da_node_entry_t
);
1024 memmove(btree
, &save_node
->btree
[0], tmp
);
1025 btree
= &save_node
->btree
[0];
1026 xfs_da_log_buf(tp
, save_blk
->bp
,
1027 XFS_DA_LOGRANGE(save_node
, btree
,
1028 (be16_to_cpu(save_node
->hdr
.count
) + be16_to_cpu(drop_node
->hdr
.count
)) *
1029 sizeof(xfs_da_node_entry_t
)));
1031 btree
= &save_node
->btree
[be16_to_cpu(save_node
->hdr
.count
)];
1032 xfs_da_log_buf(tp
, save_blk
->bp
,
1033 XFS_DA_LOGRANGE(save_node
, btree
,
1034 be16_to_cpu(drop_node
->hdr
.count
) *
1035 sizeof(xfs_da_node_entry_t
)));
1039 * Move all the B-tree elements from drop_blk to save_blk.
1041 tmp
= be16_to_cpu(drop_node
->hdr
.count
) * (uint
)sizeof(xfs_da_node_entry_t
);
1042 memcpy(btree
, &drop_node
->btree
[0], tmp
);
1043 be16_add_cpu(&save_node
->hdr
.count
, be16_to_cpu(drop_node
->hdr
.count
));
1045 xfs_da_log_buf(tp
, save_blk
->bp
,
1046 XFS_DA_LOGRANGE(save_node
, &save_node
->hdr
,
1047 sizeof(save_node
->hdr
)));
1050 * Save the last hashval in the remaining block for upward propagation.
1052 save_blk
->hashval
= be32_to_cpu(save_node
->btree
[be16_to_cpu(save_node
->hdr
.count
)-1].hashval
);
1055 /*========================================================================
1056 * Routines used for finding things in the Btree.
1057 *========================================================================*/
1060 * Walk down the Btree looking for a particular filename, filling
1061 * in the state structure as we go.
1063 * We will set the state structure to point to each of the elements
1064 * in each of the nodes where either the hashval is or should be.
1066 * We support duplicate hashval's so for each entry in the current
1067 * node that could contain the desired hashval, descend. This is a
1068 * pruned depth-first tree search.
1071 xfs_da_node_lookup_int(xfs_da_state_t
*state
, int *result
)
1073 xfs_da_state_blk_t
*blk
;
1074 xfs_da_blkinfo_t
*curr
;
1075 xfs_da_intnode_t
*node
;
1076 xfs_da_node_entry_t
*btree
;
1078 int probe
, span
, max
, error
, retval
;
1079 xfs_dahash_t hashval
, btreehashval
;
1080 xfs_da_args_t
*args
;
1085 * Descend thru the B-tree searching each level for the right
1086 * node to use, until the right hashval is found.
1088 blkno
= (args
->whichfork
== XFS_DATA_FORK
)? state
->mp
->m_dirleafblk
: 0;
1089 for (blk
= &state
->path
.blk
[0], state
->path
.active
= 1;
1090 state
->path
.active
<= XFS_DA_NODE_MAXDEPTH
;
1091 blk
++, state
->path
.active
++) {
1093 * Read the next node down in the tree.
1096 error
= xfs_da_read_buf(args
->trans
, args
->dp
, blkno
,
1097 -1, &blk
->bp
, args
->whichfork
);
1100 state
->path
.active
--;
1103 curr
= blk
->bp
->data
;
1104 blk
->magic
= be16_to_cpu(curr
->magic
);
1105 ASSERT(blk
->magic
== XFS_DA_NODE_MAGIC
||
1106 blk
->magic
== XFS_DIR2_LEAFN_MAGIC
||
1107 blk
->magic
== XFS_ATTR_LEAF_MAGIC
);
1110 * Search an intermediate node for a match.
1112 if (blk
->magic
== XFS_DA_NODE_MAGIC
) {
1113 node
= blk
->bp
->data
;
1114 max
= be16_to_cpu(node
->hdr
.count
);
1115 blk
->hashval
= be32_to_cpu(node
->btree
[max
-1].hashval
);
1118 * Binary search. (note: small blocks will skip loop)
1120 probe
= span
= max
/ 2;
1121 hashval
= args
->hashval
;
1122 for (btree
= &node
->btree
[probe
]; span
> 4;
1123 btree
= &node
->btree
[probe
]) {
1125 btreehashval
= be32_to_cpu(btree
->hashval
);
1126 if (btreehashval
< hashval
)
1128 else if (btreehashval
> hashval
)
1133 ASSERT((probe
>= 0) && (probe
< max
));
1134 ASSERT((span
<= 4) || (be32_to_cpu(btree
->hashval
) == hashval
));
1137 * Since we may have duplicate hashval's, find the first
1138 * matching hashval in the node.
1140 while ((probe
> 0) && (be32_to_cpu(btree
->hashval
) >= hashval
)) {
1144 while ((probe
< max
) && (be32_to_cpu(btree
->hashval
) < hashval
)) {
1150 * Pick the right block to descend on.
1154 blkno
= be32_to_cpu(node
->btree
[max
-1].before
);
1157 blkno
= be32_to_cpu(btree
->before
);
1159 } else if (blk
->magic
== XFS_ATTR_LEAF_MAGIC
) {
1160 blk
->hashval
= xfs_attr_leaf_lasthash(blk
->bp
, NULL
);
1162 } else if (blk
->magic
== XFS_DIR2_LEAFN_MAGIC
) {
1163 blk
->hashval
= xfs_dir2_leafn_lasthash(blk
->bp
, NULL
);
1169 * A leaf block that ends in the hashval that we are interested in
1170 * (final hashval == search hashval) means that the next block may
1171 * contain more entries with the same hashval, shift upward to the
1172 * next leaf and keep searching.
1175 if (blk
->magic
== XFS_DIR2_LEAFN_MAGIC
) {
1176 retval
= xfs_dir2_leafn_lookup_int(blk
->bp
, args
,
1177 &blk
->index
, state
);
1178 } else if (blk
->magic
== XFS_ATTR_LEAF_MAGIC
) {
1179 retval
= xfs_attr_leaf_lookup_int(blk
->bp
, args
);
1180 blk
->index
= args
->index
;
1181 args
->blkno
= blk
->blkno
;
1184 return XFS_ERROR(EFSCORRUPTED
);
1186 if (((retval
== ENOENT
) || (retval
== ENOATTR
)) &&
1187 (blk
->hashval
== args
->hashval
)) {
1188 error
= xfs_da_path_shift(state
, &state
->path
, 1, 1,
1194 } else if (blk
->magic
== XFS_ATTR_LEAF_MAGIC
) {
1195 /* path_shift() gives ENOENT */
1196 retval
= XFS_ERROR(ENOATTR
);
1205 /*========================================================================
1207 *========================================================================*/
1210 * Link a new block into a doubly linked list of blocks (of whatever type).
1213 xfs_da_blk_link(xfs_da_state_t
*state
, xfs_da_state_blk_t
*old_blk
,
1214 xfs_da_state_blk_t
*new_blk
)
1216 xfs_da_blkinfo_t
*old_info
, *new_info
, *tmp_info
;
1217 xfs_da_args_t
*args
;
1218 int before
=0, error
;
1222 * Set up environment.
1225 ASSERT(args
!= NULL
);
1226 old_info
= old_blk
->bp
->data
;
1227 new_info
= new_blk
->bp
->data
;
1228 ASSERT(old_blk
->magic
== XFS_DA_NODE_MAGIC
||
1229 old_blk
->magic
== XFS_DIR2_LEAFN_MAGIC
||
1230 old_blk
->magic
== XFS_ATTR_LEAF_MAGIC
);
1231 ASSERT(old_blk
->magic
== be16_to_cpu(old_info
->magic
));
1232 ASSERT(new_blk
->magic
== be16_to_cpu(new_info
->magic
));
1233 ASSERT(old_blk
->magic
== new_blk
->magic
);
1235 switch (old_blk
->magic
) {
1236 case XFS_ATTR_LEAF_MAGIC
:
1237 before
= xfs_attr_leaf_order(old_blk
->bp
, new_blk
->bp
);
1239 case XFS_DIR2_LEAFN_MAGIC
:
1240 before
= xfs_dir2_leafn_order(old_blk
->bp
, new_blk
->bp
);
1242 case XFS_DA_NODE_MAGIC
:
1243 before
= xfs_da_node_order(old_blk
->bp
, new_blk
->bp
);
1248 * Link blocks in appropriate order.
1252 * Link new block in before existing block.
1254 trace_xfs_da_link_before(args
);
1255 new_info
->forw
= cpu_to_be32(old_blk
->blkno
);
1256 new_info
->back
= old_info
->back
;
1257 if (old_info
->back
) {
1258 error
= xfs_da_read_buf(args
->trans
, args
->dp
,
1259 be32_to_cpu(old_info
->back
),
1260 -1, &bp
, args
->whichfork
);
1264 tmp_info
= bp
->data
;
1265 ASSERT(be16_to_cpu(tmp_info
->magic
) == be16_to_cpu(old_info
->magic
));
1266 ASSERT(be32_to_cpu(tmp_info
->forw
) == old_blk
->blkno
);
1267 tmp_info
->forw
= cpu_to_be32(new_blk
->blkno
);
1268 xfs_da_log_buf(args
->trans
, bp
, 0, sizeof(*tmp_info
)-1);
1269 xfs_da_buf_done(bp
);
1271 old_info
->back
= cpu_to_be32(new_blk
->blkno
);
1274 * Link new block in after existing block.
1276 trace_xfs_da_link_after(args
);
1277 new_info
->forw
= old_info
->forw
;
1278 new_info
->back
= cpu_to_be32(old_blk
->blkno
);
1279 if (old_info
->forw
) {
1280 error
= xfs_da_read_buf(args
->trans
, args
->dp
,
1281 be32_to_cpu(old_info
->forw
),
1282 -1, &bp
, args
->whichfork
);
1286 tmp_info
= bp
->data
;
1287 ASSERT(tmp_info
->magic
== old_info
->magic
);
1288 ASSERT(be32_to_cpu(tmp_info
->back
) == old_blk
->blkno
);
1289 tmp_info
->back
= cpu_to_be32(new_blk
->blkno
);
1290 xfs_da_log_buf(args
->trans
, bp
, 0, sizeof(*tmp_info
)-1);
1291 xfs_da_buf_done(bp
);
1293 old_info
->forw
= cpu_to_be32(new_blk
->blkno
);
1296 xfs_da_log_buf(args
->trans
, old_blk
->bp
, 0, sizeof(*tmp_info
) - 1);
1297 xfs_da_log_buf(args
->trans
, new_blk
->bp
, 0, sizeof(*tmp_info
) - 1);
1302 * Compare two intermediate nodes for "order".
1305 xfs_da_node_order(xfs_dabuf_t
*node1_bp
, xfs_dabuf_t
*node2_bp
)
1307 xfs_da_intnode_t
*node1
, *node2
;
1309 node1
= node1_bp
->data
;
1310 node2
= node2_bp
->data
;
1311 ASSERT(node1
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
) &&
1312 node2
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
1313 if ((be16_to_cpu(node1
->hdr
.count
) > 0) && (be16_to_cpu(node2
->hdr
.count
) > 0) &&
1314 ((be32_to_cpu(node2
->btree
[0].hashval
) <
1315 be32_to_cpu(node1
->btree
[0].hashval
)) ||
1316 (be32_to_cpu(node2
->btree
[be16_to_cpu(node2
->hdr
.count
)-1].hashval
) <
1317 be32_to_cpu(node1
->btree
[be16_to_cpu(node1
->hdr
.count
)-1].hashval
)))) {
1324 * Pick up the last hashvalue from an intermediate node.
1327 xfs_da_node_lasthash(xfs_dabuf_t
*bp
, int *count
)
1329 xfs_da_intnode_t
*node
;
1332 ASSERT(node
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
1334 *count
= be16_to_cpu(node
->hdr
.count
);
1335 if (!node
->hdr
.count
)
1337 return be32_to_cpu(node
->btree
[be16_to_cpu(node
->hdr
.count
)-1].hashval
);
1341 * Unlink a block from a doubly linked list of blocks.
1343 STATIC
int /* error */
1344 xfs_da_blk_unlink(xfs_da_state_t
*state
, xfs_da_state_blk_t
*drop_blk
,
1345 xfs_da_state_blk_t
*save_blk
)
1347 xfs_da_blkinfo_t
*drop_info
, *save_info
, *tmp_info
;
1348 xfs_da_args_t
*args
;
1353 * Set up environment.
1356 ASSERT(args
!= NULL
);
1357 save_info
= save_blk
->bp
->data
;
1358 drop_info
= drop_blk
->bp
->data
;
1359 ASSERT(save_blk
->magic
== XFS_DA_NODE_MAGIC
||
1360 save_blk
->magic
== XFS_DIR2_LEAFN_MAGIC
||
1361 save_blk
->magic
== XFS_ATTR_LEAF_MAGIC
);
1362 ASSERT(save_blk
->magic
== be16_to_cpu(save_info
->magic
));
1363 ASSERT(drop_blk
->magic
== be16_to_cpu(drop_info
->magic
));
1364 ASSERT(save_blk
->magic
== drop_blk
->magic
);
1365 ASSERT((be32_to_cpu(save_info
->forw
) == drop_blk
->blkno
) ||
1366 (be32_to_cpu(save_info
->back
) == drop_blk
->blkno
));
1367 ASSERT((be32_to_cpu(drop_info
->forw
) == save_blk
->blkno
) ||
1368 (be32_to_cpu(drop_info
->back
) == save_blk
->blkno
));
1371 * Unlink the leaf block from the doubly linked chain of leaves.
1373 if (be32_to_cpu(save_info
->back
) == drop_blk
->blkno
) {
1374 trace_xfs_da_unlink_back(args
);
1375 save_info
->back
= drop_info
->back
;
1376 if (drop_info
->back
) {
1377 error
= xfs_da_read_buf(args
->trans
, args
->dp
,
1378 be32_to_cpu(drop_info
->back
),
1379 -1, &bp
, args
->whichfork
);
1383 tmp_info
= bp
->data
;
1384 ASSERT(tmp_info
->magic
== save_info
->magic
);
1385 ASSERT(be32_to_cpu(tmp_info
->forw
) == drop_blk
->blkno
);
1386 tmp_info
->forw
= cpu_to_be32(save_blk
->blkno
);
1387 xfs_da_log_buf(args
->trans
, bp
, 0,
1388 sizeof(*tmp_info
) - 1);
1389 xfs_da_buf_done(bp
);
1392 trace_xfs_da_unlink_forward(args
);
1393 save_info
->forw
= drop_info
->forw
;
1394 if (drop_info
->forw
) {
1395 error
= xfs_da_read_buf(args
->trans
, args
->dp
,
1396 be32_to_cpu(drop_info
->forw
),
1397 -1, &bp
, args
->whichfork
);
1401 tmp_info
= bp
->data
;
1402 ASSERT(tmp_info
->magic
== save_info
->magic
);
1403 ASSERT(be32_to_cpu(tmp_info
->back
) == drop_blk
->blkno
);
1404 tmp_info
->back
= cpu_to_be32(save_blk
->blkno
);
1405 xfs_da_log_buf(args
->trans
, bp
, 0,
1406 sizeof(*tmp_info
) - 1);
1407 xfs_da_buf_done(bp
);
1411 xfs_da_log_buf(args
->trans
, save_blk
->bp
, 0, sizeof(*save_info
) - 1);
1416 * Move a path "forward" or "!forward" one block at the current level.
1418 * This routine will adjust a "path" to point to the next block
1419 * "forward" (higher hashvalues) or "!forward" (lower hashvals) in the
1420 * Btree, including updating pointers to the intermediate nodes between
1421 * the new bottom and the root.
1424 xfs_da_path_shift(xfs_da_state_t
*state
, xfs_da_state_path_t
*path
,
1425 int forward
, int release
, int *result
)
1427 xfs_da_state_blk_t
*blk
;
1428 xfs_da_blkinfo_t
*info
;
1429 xfs_da_intnode_t
*node
;
1430 xfs_da_args_t
*args
;
1431 xfs_dablk_t blkno
=0;
1435 * Roll up the Btree looking for the first block where our
1436 * current index is not at the edge of the block. Note that
1437 * we skip the bottom layer because we want the sibling block.
1440 ASSERT(args
!= NULL
);
1441 ASSERT(path
!= NULL
);
1442 ASSERT((path
->active
> 0) && (path
->active
< XFS_DA_NODE_MAXDEPTH
));
1443 level
= (path
->active
-1) - 1; /* skip bottom layer in path */
1444 for (blk
= &path
->blk
[level
]; level
>= 0; blk
--, level
--) {
1445 ASSERT(blk
->bp
!= NULL
);
1446 node
= blk
->bp
->data
;
1447 ASSERT(node
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
1448 if (forward
&& (blk
->index
< be16_to_cpu(node
->hdr
.count
)-1)) {
1450 blkno
= be32_to_cpu(node
->btree
[blk
->index
].before
);
1452 } else if (!forward
&& (blk
->index
> 0)) {
1454 blkno
= be32_to_cpu(node
->btree
[blk
->index
].before
);
1459 *result
= XFS_ERROR(ENOENT
); /* we're out of our tree */
1460 ASSERT(args
->op_flags
& XFS_DA_OP_OKNOENT
);
1465 * Roll down the edge of the subtree until we reach the
1466 * same depth we were at originally.
1468 for (blk
++, level
++; level
< path
->active
; blk
++, level
++) {
1470 * Release the old block.
1471 * (if it's dirty, trans won't actually let go)
1474 xfs_da_brelse(args
->trans
, blk
->bp
);
1477 * Read the next child block.
1480 error
= xfs_da_read_buf(args
->trans
, args
->dp
, blkno
, -1,
1481 &blk
->bp
, args
->whichfork
);
1484 ASSERT(blk
->bp
!= NULL
);
1485 info
= blk
->bp
->data
;
1486 ASSERT(info
->magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
) ||
1487 info
->magic
== cpu_to_be16(XFS_DIR2_LEAFN_MAGIC
) ||
1488 info
->magic
== cpu_to_be16(XFS_ATTR_LEAF_MAGIC
));
1489 blk
->magic
= be16_to_cpu(info
->magic
);
1490 if (blk
->magic
== XFS_DA_NODE_MAGIC
) {
1491 node
= (xfs_da_intnode_t
*)info
;
1492 blk
->hashval
= be32_to_cpu(node
->btree
[be16_to_cpu(node
->hdr
.count
)-1].hashval
);
1496 blk
->index
= be16_to_cpu(node
->hdr
.count
)-1;
1497 blkno
= be32_to_cpu(node
->btree
[blk
->index
].before
);
1499 ASSERT(level
== path
->active
-1);
1501 switch(blk
->magic
) {
1502 case XFS_ATTR_LEAF_MAGIC
:
1503 blk
->hashval
= xfs_attr_leaf_lasthash(blk
->bp
,
1506 case XFS_DIR2_LEAFN_MAGIC
:
1507 blk
->hashval
= xfs_dir2_leafn_lasthash(blk
->bp
,
1511 ASSERT(blk
->magic
== XFS_ATTR_LEAF_MAGIC
||
1512 blk
->magic
== XFS_DIR2_LEAFN_MAGIC
);
1522 /*========================================================================
1524 *========================================================================*/
1527 * Implement a simple hash on a character string.
1528 * Rotate the hash value by 7 bits, then XOR each character in.
1529 * This is implemented with some source-level loop unrolling.
1532 xfs_da_hashname(const __uint8_t
*name
, int namelen
)
1537 * Do four characters at a time as long as we can.
1539 for (hash
= 0; namelen
>= 4; namelen
-= 4, name
+= 4)
1540 hash
= (name
[0] << 21) ^ (name
[1] << 14) ^ (name
[2] << 7) ^
1541 (name
[3] << 0) ^ rol32(hash
, 7 * 4);
1544 * Now do the rest of the characters.
1548 return (name
[0] << 14) ^ (name
[1] << 7) ^ (name
[2] << 0) ^
1551 return (name
[0] << 7) ^ (name
[1] << 0) ^ rol32(hash
, 7 * 2);
1553 return (name
[0] << 0) ^ rol32(hash
, 7 * 1);
1554 default: /* case 0: */
1561 struct xfs_da_args
*args
,
1562 const unsigned char *name
,
1565 return (args
->namelen
== len
&& memcmp(args
->name
, name
, len
) == 0) ?
1566 XFS_CMP_EXACT
: XFS_CMP_DIFFERENT
;
1570 xfs_default_hashname(
1571 struct xfs_name
*name
)
1573 return xfs_da_hashname(name
->name
, name
->len
);
1576 const struct xfs_nameops xfs_default_nameops
= {
1577 .hashname
= xfs_default_hashname
,
1578 .compname
= xfs_da_compname
1582 xfs_da_grow_inode_int(
1583 struct xfs_da_args
*args
,
1587 struct xfs_trans
*tp
= args
->trans
;
1588 struct xfs_inode
*dp
= args
->dp
;
1589 int w
= args
->whichfork
;
1590 xfs_drfsbno_t nblks
= dp
->i_d
.di_nblocks
;
1591 struct xfs_bmbt_irec map
, *mapp
;
1592 int nmap
, error
, got
, i
, mapi
;
1595 * Find a spot in the file space to put the new block.
1597 error
= xfs_bmap_first_unused(tp
, dp
, count
, bno
, w
);
1602 * Try mapping it in one filesystem block.
1605 ASSERT(args
->firstblock
!= NULL
);
1606 error
= xfs_bmapi_write(tp
, dp
, *bno
, count
,
1607 xfs_bmapi_aflag(w
)|XFS_BMAPI_METADATA
|XFS_BMAPI_CONTIG
,
1608 args
->firstblock
, args
->total
, &map
, &nmap
,
1617 } else if (nmap
== 0 && count
> 1) {
1622 * If we didn't get it and the block might work if fragmented,
1623 * try without the CONTIG flag. Loop until we get it all.
1625 mapp
= kmem_alloc(sizeof(*mapp
) * count
, KM_SLEEP
);
1626 for (b
= *bno
, mapi
= 0; b
< *bno
+ count
; ) {
1627 nmap
= MIN(XFS_BMAP_MAX_NMAP
, count
);
1628 c
= (int)(*bno
+ count
- b
);
1629 error
= xfs_bmapi_write(tp
, dp
, b
, c
,
1630 xfs_bmapi_aflag(w
)|XFS_BMAPI_METADATA
,
1631 args
->firstblock
, args
->total
,
1632 &mapp
[mapi
], &nmap
, args
->flist
);
1638 b
= mapp
[mapi
- 1].br_startoff
+
1639 mapp
[mapi
- 1].br_blockcount
;
1647 * Count the blocks we got, make sure it matches the total.
1649 for (i
= 0, got
= 0; i
< mapi
; i
++)
1650 got
+= mapp
[i
].br_blockcount
;
1651 if (got
!= count
|| mapp
[0].br_startoff
!= *bno
||
1652 mapp
[mapi
- 1].br_startoff
+ mapp
[mapi
- 1].br_blockcount
!=
1654 error
= XFS_ERROR(ENOSPC
);
1658 /* account for newly allocated blocks in reserved blocks total */
1659 args
->total
-= dp
->i_d
.di_nblocks
- nblks
;
1668 * Add a block to the btree ahead of the file.
1669 * Return the new block number to the caller.
1673 struct xfs_da_args
*args
,
1674 xfs_dablk_t
*new_blkno
)
1680 trace_xfs_da_grow_inode(args
);
1682 if (args
->whichfork
== XFS_DATA_FORK
) {
1683 bno
= args
->dp
->i_mount
->m_dirleafblk
;
1684 count
= args
->dp
->i_mount
->m_dirblkfsbs
;
1690 error
= xfs_da_grow_inode_int(args
, &bno
, count
);
1692 *new_blkno
= (xfs_dablk_t
)bno
;
1697 * Ick. We need to always be able to remove a btree block, even
1698 * if there's no space reservation because the filesystem is full.
1699 * This is called if xfs_bunmapi on a btree block fails due to ENOSPC.
1700 * It swaps the target block with the last block in the file. The
1701 * last block in the file can always be removed since it can't cause
1702 * a bmap btree split to do that.
1705 xfs_da_swap_lastblock(xfs_da_args_t
*args
, xfs_dablk_t
*dead_blknop
,
1706 xfs_dabuf_t
**dead_bufp
)
1708 xfs_dablk_t dead_blkno
, last_blkno
, sib_blkno
, par_blkno
;
1709 xfs_dabuf_t
*dead_buf
, *last_buf
, *sib_buf
, *par_buf
;
1710 xfs_fileoff_t lastoff
;
1714 int error
, w
, entno
, level
, dead_level
;
1715 xfs_da_blkinfo_t
*dead_info
, *sib_info
;
1716 xfs_da_intnode_t
*par_node
, *dead_node
;
1717 xfs_dir2_leaf_t
*dead_leaf2
;
1718 xfs_dahash_t dead_hash
;
1720 trace_xfs_da_swap_lastblock(args
);
1722 dead_buf
= *dead_bufp
;
1723 dead_blkno
= *dead_blknop
;
1726 w
= args
->whichfork
;
1727 ASSERT(w
== XFS_DATA_FORK
);
1729 lastoff
= mp
->m_dirfreeblk
;
1730 error
= xfs_bmap_last_before(tp
, ip
, &lastoff
, w
);
1733 if (unlikely(lastoff
== 0)) {
1734 XFS_ERROR_REPORT("xfs_da_swap_lastblock(1)", XFS_ERRLEVEL_LOW
,
1736 return XFS_ERROR(EFSCORRUPTED
);
1739 * Read the last block in the btree space.
1741 last_blkno
= (xfs_dablk_t
)lastoff
- mp
->m_dirblkfsbs
;
1742 if ((error
= xfs_da_read_buf(tp
, ip
, last_blkno
, -1, &last_buf
, w
)))
1745 * Copy the last block into the dead buffer and log it.
1747 memcpy(dead_buf
->data
, last_buf
->data
, mp
->m_dirblksize
);
1748 xfs_da_log_buf(tp
, dead_buf
, 0, mp
->m_dirblksize
- 1);
1749 dead_info
= dead_buf
->data
;
1751 * Get values from the moved block.
1753 if (dead_info
->magic
== cpu_to_be16(XFS_DIR2_LEAFN_MAGIC
)) {
1754 dead_leaf2
= (xfs_dir2_leaf_t
*)dead_info
;
1756 dead_hash
= be32_to_cpu(dead_leaf2
->ents
[be16_to_cpu(dead_leaf2
->hdr
.count
) - 1].hashval
);
1758 ASSERT(dead_info
->magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
1759 dead_node
= (xfs_da_intnode_t
*)dead_info
;
1760 dead_level
= be16_to_cpu(dead_node
->hdr
.level
);
1761 dead_hash
= be32_to_cpu(dead_node
->btree
[be16_to_cpu(dead_node
->hdr
.count
) - 1].hashval
);
1763 sib_buf
= par_buf
= NULL
;
1765 * If the moved block has a left sibling, fix up the pointers.
1767 if ((sib_blkno
= be32_to_cpu(dead_info
->back
))) {
1768 if ((error
= xfs_da_read_buf(tp
, ip
, sib_blkno
, -1, &sib_buf
, w
)))
1770 sib_info
= sib_buf
->data
;
1772 be32_to_cpu(sib_info
->forw
) != last_blkno
||
1773 sib_info
->magic
!= dead_info
->magic
)) {
1774 XFS_ERROR_REPORT("xfs_da_swap_lastblock(2)",
1775 XFS_ERRLEVEL_LOW
, mp
);
1776 error
= XFS_ERROR(EFSCORRUPTED
);
1779 sib_info
->forw
= cpu_to_be32(dead_blkno
);
1780 xfs_da_log_buf(tp
, sib_buf
,
1781 XFS_DA_LOGRANGE(sib_info
, &sib_info
->forw
,
1782 sizeof(sib_info
->forw
)));
1783 xfs_da_buf_done(sib_buf
);
1787 * If the moved block has a right sibling, fix up the pointers.
1789 if ((sib_blkno
= be32_to_cpu(dead_info
->forw
))) {
1790 if ((error
= xfs_da_read_buf(tp
, ip
, sib_blkno
, -1, &sib_buf
, w
)))
1792 sib_info
= sib_buf
->data
;
1794 be32_to_cpu(sib_info
->back
) != last_blkno
||
1795 sib_info
->magic
!= dead_info
->magic
)) {
1796 XFS_ERROR_REPORT("xfs_da_swap_lastblock(3)",
1797 XFS_ERRLEVEL_LOW
, mp
);
1798 error
= XFS_ERROR(EFSCORRUPTED
);
1801 sib_info
->back
= cpu_to_be32(dead_blkno
);
1802 xfs_da_log_buf(tp
, sib_buf
,
1803 XFS_DA_LOGRANGE(sib_info
, &sib_info
->back
,
1804 sizeof(sib_info
->back
)));
1805 xfs_da_buf_done(sib_buf
);
1808 par_blkno
= mp
->m_dirleafblk
;
1811 * Walk down the tree looking for the parent of the moved block.
1814 if ((error
= xfs_da_read_buf(tp
, ip
, par_blkno
, -1, &par_buf
, w
)))
1816 par_node
= par_buf
->data
;
1817 if (unlikely(par_node
->hdr
.info
.magic
!=
1818 cpu_to_be16(XFS_DA_NODE_MAGIC
) ||
1819 (level
>= 0 && level
!= be16_to_cpu(par_node
->hdr
.level
) + 1))) {
1820 XFS_ERROR_REPORT("xfs_da_swap_lastblock(4)",
1821 XFS_ERRLEVEL_LOW
, mp
);
1822 error
= XFS_ERROR(EFSCORRUPTED
);
1825 level
= be16_to_cpu(par_node
->hdr
.level
);
1827 entno
< be16_to_cpu(par_node
->hdr
.count
) &&
1828 be32_to_cpu(par_node
->btree
[entno
].hashval
) < dead_hash
;
1831 if (unlikely(entno
== be16_to_cpu(par_node
->hdr
.count
))) {
1832 XFS_ERROR_REPORT("xfs_da_swap_lastblock(5)",
1833 XFS_ERRLEVEL_LOW
, mp
);
1834 error
= XFS_ERROR(EFSCORRUPTED
);
1837 par_blkno
= be32_to_cpu(par_node
->btree
[entno
].before
);
1838 if (level
== dead_level
+ 1)
1840 xfs_da_brelse(tp
, par_buf
);
1844 * We're in the right parent block.
1845 * Look for the right entry.
1849 entno
< be16_to_cpu(par_node
->hdr
.count
) &&
1850 be32_to_cpu(par_node
->btree
[entno
].before
) != last_blkno
;
1853 if (entno
< be16_to_cpu(par_node
->hdr
.count
))
1855 par_blkno
= be32_to_cpu(par_node
->hdr
.info
.forw
);
1856 xfs_da_brelse(tp
, par_buf
);
1858 if (unlikely(par_blkno
== 0)) {
1859 XFS_ERROR_REPORT("xfs_da_swap_lastblock(6)",
1860 XFS_ERRLEVEL_LOW
, mp
);
1861 error
= XFS_ERROR(EFSCORRUPTED
);
1864 if ((error
= xfs_da_read_buf(tp
, ip
, par_blkno
, -1, &par_buf
, w
)))
1866 par_node
= par_buf
->data
;
1868 be16_to_cpu(par_node
->hdr
.level
) != level
||
1869 par_node
->hdr
.info
.magic
!= cpu_to_be16(XFS_DA_NODE_MAGIC
))) {
1870 XFS_ERROR_REPORT("xfs_da_swap_lastblock(7)",
1871 XFS_ERRLEVEL_LOW
, mp
);
1872 error
= XFS_ERROR(EFSCORRUPTED
);
1878 * Update the parent entry pointing to the moved block.
1880 par_node
->btree
[entno
].before
= cpu_to_be32(dead_blkno
);
1881 xfs_da_log_buf(tp
, par_buf
,
1882 XFS_DA_LOGRANGE(par_node
, &par_node
->btree
[entno
].before
,
1883 sizeof(par_node
->btree
[entno
].before
)));
1884 xfs_da_buf_done(par_buf
);
1885 xfs_da_buf_done(dead_buf
);
1886 *dead_blknop
= last_blkno
;
1887 *dead_bufp
= last_buf
;
1891 xfs_da_brelse(tp
, par_buf
);
1893 xfs_da_brelse(tp
, sib_buf
);
1894 xfs_da_brelse(tp
, last_buf
);
1899 * Remove a btree block from a directory or attribute.
1902 xfs_da_shrink_inode(xfs_da_args_t
*args
, xfs_dablk_t dead_blkno
,
1903 xfs_dabuf_t
*dead_buf
)
1906 int done
, error
, w
, count
;
1910 trace_xfs_da_shrink_inode(args
);
1913 w
= args
->whichfork
;
1916 if (w
== XFS_DATA_FORK
)
1917 count
= mp
->m_dirblkfsbs
;
1922 * Remove extents. If we get ENOSPC for a dir we have to move
1923 * the last block to the place we want to kill.
1925 if ((error
= xfs_bunmapi(tp
, dp
, dead_blkno
, count
,
1926 xfs_bmapi_aflag(w
)|XFS_BMAPI_METADATA
,
1927 0, args
->firstblock
, args
->flist
,
1928 &done
)) == ENOSPC
) {
1929 if (w
!= XFS_DATA_FORK
)
1931 if ((error
= xfs_da_swap_lastblock(args
, &dead_blkno
,
1938 xfs_da_binval(tp
, dead_buf
);
1943 * See if the mapping(s) for this btree block are valid, i.e.
1944 * don't contain holes, are logically contiguous, and cover the whole range.
1947 xfs_da_map_covers_blocks(
1949 xfs_bmbt_irec_t
*mapp
,
1956 for (i
= 0, off
= bno
; i
< nmap
; i
++) {
1957 if (mapp
[i
].br_startblock
== HOLESTARTBLOCK
||
1958 mapp
[i
].br_startblock
== DELAYSTARTBLOCK
) {
1961 if (off
!= mapp
[i
].br_startoff
) {
1964 off
+= mapp
[i
].br_blockcount
;
1966 return off
== bno
+ count
;
1971 * Used for get_buf, read_buf, read_bufr, and reada_buf.
1978 xfs_daddr_t
*mappedbnop
,
1983 xfs_buf_t
*bp
= NULL
;
1987 xfs_bmbt_irec_t map
;
1988 xfs_bmbt_irec_t
*mapp
;
1989 xfs_daddr_t mappedbno
;
1997 nfsb
= (whichfork
== XFS_DATA_FORK
) ? mp
->m_dirblkfsbs
: 1;
1998 mappedbno
= *mappedbnop
;
2000 * Caller doesn't have a mapping. -2 means don't complain
2001 * if we land in a hole.
2003 if (mappedbno
== -1 || mappedbno
== -2) {
2005 * Optimize the one-block case.
2010 mapp
= kmem_alloc(sizeof(*mapp
) * nfsb
, KM_SLEEP
);
2013 error
= xfs_bmapi_read(dp
, (xfs_fileoff_t
)bno
, nfsb
, mapp
,
2014 &nmap
, xfs_bmapi_aflag(whichfork
));
2018 map
.br_startblock
= XFS_DADDR_TO_FSB(mp
, mappedbno
);
2019 map
.br_startoff
= (xfs_fileoff_t
)bno
;
2020 map
.br_blockcount
= nfsb
;
2024 if (!xfs_da_map_covers_blocks(nmap
, mapp
, bno
, nfsb
)) {
2025 error
= mappedbno
== -2 ? 0 : XFS_ERROR(EFSCORRUPTED
);
2026 if (unlikely(error
== EFSCORRUPTED
)) {
2027 if (xfs_error_level
>= XFS_ERRLEVEL_LOW
) {
2028 xfs_alert(mp
, "%s: bno %lld dir: inode %lld",
2029 __func__
, (long long)bno
,
2030 (long long)dp
->i_ino
);
2031 for (i
= 0; i
< nmap
; i
++) {
2033 "[%02d] br_startoff %lld br_startblock %lld br_blockcount %lld br_state %d",
2035 (long long)mapp
[i
].br_startoff
,
2036 (long long)mapp
[i
].br_startblock
,
2037 (long long)mapp
[i
].br_blockcount
,
2041 XFS_ERROR_REPORT("xfs_da_do_buf(1)",
2042 XFS_ERRLEVEL_LOW
, mp
);
2046 if (caller
!= 3 && nmap
> 1) {
2047 bplist
= kmem_alloc(sizeof(*bplist
) * nmap
, KM_SLEEP
);
2052 * Turn the mapping(s) into buffer(s).
2054 for (i
= 0; i
< nmap
; i
++) {
2057 mappedbno
= XFS_FSB_TO_DADDR(mp
, mapp
[i
].br_startblock
);
2059 *mappedbnop
= mappedbno
;
2060 nmapped
= (int)XFS_FSB_TO_BB(mp
, mapp
[i
].br_blockcount
);
2063 bp
= xfs_trans_get_buf(trans
, mp
->m_ddev_targp
,
2064 mappedbno
, nmapped
, 0);
2065 error
= bp
? bp
->b_error
: XFS_ERROR(EIO
);
2070 error
= xfs_trans_read_buf(mp
, trans
, mp
->m_ddev_targp
,
2071 mappedbno
, nmapped
, 0, &bp
);
2074 xfs_buf_readahead(mp
->m_ddev_targp
, mappedbno
, nmapped
);
2081 xfs_trans_brelse(trans
, bp
);
2087 if (whichfork
== XFS_ATTR_FORK
)
2088 xfs_buf_set_ref(bp
, XFS_ATTR_BTREE_REF
);
2090 xfs_buf_set_ref(bp
, XFS_DIR_BTREE_REF
);
2093 bplist
[nbplist
++] = bp
;
2097 * Build a dabuf structure.
2100 rbp
= xfs_da_buf_make(nbplist
, bplist
);
2102 rbp
= xfs_da_buf_make(1, &bp
);
2106 * For read_buf, check the magic number.
2109 xfs_dir2_data_hdr_t
*hdr
= rbp
->data
;
2110 xfs_dir2_free_t
*free
= rbp
->data
;
2111 xfs_da_blkinfo_t
*info
= rbp
->data
;
2114 magic
= be16_to_cpu(info
->magic
);
2115 magic1
= be32_to_cpu(hdr
->magic
);
2117 XFS_TEST_ERROR((magic
!= XFS_DA_NODE_MAGIC
) &&
2118 (magic
!= XFS_ATTR_LEAF_MAGIC
) &&
2119 (magic
!= XFS_DIR2_LEAF1_MAGIC
) &&
2120 (magic
!= XFS_DIR2_LEAFN_MAGIC
) &&
2121 (magic1
!= XFS_DIR2_BLOCK_MAGIC
) &&
2122 (magic1
!= XFS_DIR2_DATA_MAGIC
) &&
2123 (free
->hdr
.magic
!= cpu_to_be32(XFS_DIR2_FREE_MAGIC
)),
2124 mp
, XFS_ERRTAG_DA_READ_BUF
,
2125 XFS_RANDOM_DA_READ_BUF
))) {
2126 trace_xfs_da_btree_corrupt(rbp
->bps
[0], _RET_IP_
);
2127 XFS_CORRUPTION_ERROR("xfs_da_do_buf(2)",
2128 XFS_ERRLEVEL_LOW
, mp
, info
);
2129 error
= XFS_ERROR(EFSCORRUPTED
);
2130 xfs_da_brelse(trans
, rbp
);
2146 for (i
= 0; i
< nbplist
; i
++)
2147 xfs_trans_brelse(trans
, bplist
[i
]);
2159 * Get a buffer for the dir/attr block.
2166 xfs_daddr_t mappedbno
,
2170 return xfs_da_do_buf(trans
, dp
, bno
, &mappedbno
, bpp
, whichfork
, 0);
2174 * Get a buffer for the dir/attr block, fill in the contents.
2181 xfs_daddr_t mappedbno
,
2185 return xfs_da_do_buf(trans
, dp
, bno
, &mappedbno
, bpp
, whichfork
, 1);
2189 * Readahead the dir/attr block.
2201 if (xfs_da_do_buf(trans
, dp
, bno
, &rval
, NULL
, whichfork
, 3))
2207 kmem_zone_t
*xfs_da_state_zone
; /* anchor for state struct zone */
2208 kmem_zone_t
*xfs_dabuf_zone
; /* dabuf zone */
2211 * Allocate a dir-state structure.
2212 * We don't put them on the stack since they're large.
2215 xfs_da_state_alloc(void)
2217 return kmem_zone_zalloc(xfs_da_state_zone
, KM_NOFS
);
2221 * Kill the altpath contents of a da-state structure.
2224 xfs_da_state_kill_altpath(xfs_da_state_t
*state
)
2228 for (i
= 0; i
< state
->altpath
.active
; i
++) {
2229 if (state
->altpath
.blk
[i
].bp
) {
2230 if (state
->altpath
.blk
[i
].bp
!= state
->path
.blk
[i
].bp
)
2231 xfs_da_buf_done(state
->altpath
.blk
[i
].bp
);
2232 state
->altpath
.blk
[i
].bp
= NULL
;
2235 state
->altpath
.active
= 0;
2239 * Free a da-state structure.
2242 xfs_da_state_free(xfs_da_state_t
*state
)
2246 xfs_da_state_kill_altpath(state
);
2247 for (i
= 0; i
< state
->path
.active
; i
++) {
2248 if (state
->path
.blk
[i
].bp
)
2249 xfs_da_buf_done(state
->path
.blk
[i
].bp
);
2251 if (state
->extravalid
&& state
->extrablk
.bp
)
2252 xfs_da_buf_done(state
->extrablk
.bp
);
2254 memset((char *)state
, 0, sizeof(*state
));
2256 kmem_zone_free(xfs_da_state_zone
, state
);
2263 STATIC xfs_dabuf_t
*
2264 xfs_da_buf_make(int nbuf
, xfs_buf_t
**bps
)
2272 dabuf
= kmem_zone_alloc(xfs_dabuf_zone
, KM_NOFS
);
2274 dabuf
= kmem_alloc(XFS_DA_BUF_SIZE(nbuf
), KM_NOFS
);
2279 dabuf
->bbcount
= bp
->b_length
;
2280 dabuf
->data
= bp
->b_addr
;
2284 for (i
= 0, dabuf
->bbcount
= 0; i
< nbuf
; i
++) {
2285 dabuf
->bps
[i
] = bp
= bps
[i
];
2286 dabuf
->bbcount
+= bp
->b_length
;
2288 dabuf
->data
= kmem_alloc(BBTOB(dabuf
->bbcount
), KM_SLEEP
);
2289 for (i
= off
= 0; i
< nbuf
; i
++, off
+= BBTOB(bp
->b_length
)) {
2291 memcpy((char *)dabuf
->data
+ off
, bp
->b_addr
,
2292 BBTOB(bp
->b_length
));
2302 xfs_da_buf_clean(xfs_dabuf_t
*dabuf
)
2309 ASSERT(dabuf
->nbuf
> 1);
2311 for (i
= off
= 0; i
< dabuf
->nbuf
;
2312 i
++, off
+= BBTOB(bp
->b_length
)) {
2314 memcpy(bp
->b_addr
, dabuf
->data
+ off
,
2315 BBTOB(bp
->b_length
));
2324 xfs_da_buf_done(xfs_dabuf_t
*dabuf
)
2327 ASSERT(dabuf
->nbuf
&& dabuf
->data
&& dabuf
->bbcount
&& dabuf
->bps
[0]);
2329 xfs_da_buf_clean(dabuf
);
2330 if (dabuf
->nbuf
> 1) {
2331 kmem_free(dabuf
->data
);
2334 kmem_zone_free(xfs_dabuf_zone
, dabuf
);
2339 * Log transaction from a dabuf.
2342 xfs_da_log_buf(xfs_trans_t
*tp
, xfs_dabuf_t
*dabuf
, uint first
, uint last
)
2350 ASSERT(dabuf
->nbuf
&& dabuf
->data
&& dabuf
->bbcount
&& dabuf
->bps
[0]);
2351 if (dabuf
->nbuf
== 1) {
2352 ASSERT(dabuf
->data
== dabuf
->bps
[0]->b_addr
);
2353 xfs_trans_log_buf(tp
, dabuf
->bps
[0], first
, last
);
2357 ASSERT(first
<= last
);
2358 for (i
= off
= 0; i
< dabuf
->nbuf
; i
++, off
+= BBTOB(bp
->b_length
)) {
2361 l
= f
+ BBTOB(bp
->b_length
) - 1;
2367 xfs_trans_log_buf(tp
, bp
, f
- off
, l
- off
);
2369 * B_DONE is set by xfs_trans_log buf.
2370 * If we don't set it on a new buffer (get not read)
2371 * then if we don't put anything in the buffer it won't
2372 * be set, and at commit it it released into the cache,
2373 * and then a read will fail.
2375 else if (!(XFS_BUF_ISDONE(bp
)))
2382 * Release dabuf from a transaction.
2383 * Have to free up the dabuf before the buffers are released,
2384 * since the synchronization on the dabuf is really the lock on the buffer.
2387 xfs_da_brelse(xfs_trans_t
*tp
, xfs_dabuf_t
*dabuf
)
2394 ASSERT(dabuf
->nbuf
&& dabuf
->data
&& dabuf
->bbcount
&& dabuf
->bps
[0]);
2395 if ((nbuf
= dabuf
->nbuf
) == 1) {
2399 bplist
= kmem_alloc(nbuf
* sizeof(*bplist
), KM_SLEEP
);
2400 memcpy(bplist
, dabuf
->bps
, nbuf
* sizeof(*bplist
));
2402 xfs_da_buf_done(dabuf
);
2403 for (i
= 0; i
< nbuf
; i
++)
2404 xfs_trans_brelse(tp
, bplist
[i
]);
2410 * Invalidate dabuf from a transaction.
2413 xfs_da_binval(xfs_trans_t
*tp
, xfs_dabuf_t
*dabuf
)
2420 ASSERT(dabuf
->nbuf
&& dabuf
->data
&& dabuf
->bbcount
&& dabuf
->bps
[0]);
2421 if ((nbuf
= dabuf
->nbuf
) == 1) {
2425 bplist
= kmem_alloc(nbuf
* sizeof(*bplist
), KM_SLEEP
);
2426 memcpy(bplist
, dabuf
->bps
, nbuf
* sizeof(*bplist
));
2428 xfs_da_buf_done(dabuf
);
2429 for (i
= 0; i
< nbuf
; i
++)
2430 xfs_trans_binval(tp
, bplist
[i
]);
2436 * Get the first daddr from a dabuf.
2439 xfs_da_blkno(xfs_dabuf_t
*dabuf
)
2441 ASSERT(dabuf
->nbuf
);
2442 ASSERT(dabuf
->data
);
2443 return XFS_BUF_ADDR(dabuf
->bps
[0]);