ixgbe: Add SFP support for missed 82598 PHY
[linux-2.6/cjktty.git] / fs / xfs / xfs_da_btree.c
blobee9d5427fcd4c2892577824417a186a59d8e9d88
1 /*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_types.h"
21 #include "xfs_bit.h"
22 #include "xfs_log.h"
23 #include "xfs_inum.h"
24 #include "xfs_trans.h"
25 #include "xfs_sb.h"
26 #include "xfs_ag.h"
27 #include "xfs_mount.h"
28 #include "xfs_da_btree.h"
29 #include "xfs_bmap_btree.h"
30 #include "xfs_dir2.h"
31 #include "xfs_dir2_format.h"
32 #include "xfs_dir2_priv.h"
33 #include "xfs_dinode.h"
34 #include "xfs_inode.h"
35 #include "xfs_inode_item.h"
36 #include "xfs_alloc.h"
37 #include "xfs_bmap.h"
38 #include "xfs_attr.h"
39 #include "xfs_attr_leaf.h"
40 #include "xfs_error.h"
41 #include "xfs_trace.h"
44 * xfs_da_btree.c
46 * Routines to implement directories as Btrees of hashed names.
49 /*========================================================================
50 * Function prototypes for the kernel.
51 *========================================================================*/
54 * Routines used for growing the Btree.
56 STATIC int xfs_da_root_split(xfs_da_state_t *state,
57 xfs_da_state_blk_t *existing_root,
58 xfs_da_state_blk_t *new_child);
59 STATIC int xfs_da_node_split(xfs_da_state_t *state,
60 xfs_da_state_blk_t *existing_blk,
61 xfs_da_state_blk_t *split_blk,
62 xfs_da_state_blk_t *blk_to_add,
63 int treelevel,
64 int *result);
65 STATIC void xfs_da_node_rebalance(xfs_da_state_t *state,
66 xfs_da_state_blk_t *node_blk_1,
67 xfs_da_state_blk_t *node_blk_2);
68 STATIC void xfs_da_node_add(xfs_da_state_t *state,
69 xfs_da_state_blk_t *old_node_blk,
70 xfs_da_state_blk_t *new_node_blk);
73 * Routines used for shrinking the Btree.
75 STATIC int xfs_da_root_join(xfs_da_state_t *state,
76 xfs_da_state_blk_t *root_blk);
77 STATIC int xfs_da_node_toosmall(xfs_da_state_t *state, int *retval);
78 STATIC void xfs_da_node_remove(xfs_da_state_t *state,
79 xfs_da_state_blk_t *drop_blk);
80 STATIC void xfs_da_node_unbalance(xfs_da_state_t *state,
81 xfs_da_state_blk_t *src_node_blk,
82 xfs_da_state_blk_t *dst_node_blk);
85 * Utility routines.
87 STATIC uint xfs_da_node_lasthash(xfs_dabuf_t *bp, int *count);
88 STATIC int xfs_da_node_order(xfs_dabuf_t *node1_bp, xfs_dabuf_t *node2_bp);
89 STATIC xfs_dabuf_t *xfs_da_buf_make(int nbuf, xfs_buf_t **bps);
90 STATIC int xfs_da_blk_unlink(xfs_da_state_t *state,
91 xfs_da_state_blk_t *drop_blk,
92 xfs_da_state_blk_t *save_blk);
93 STATIC void xfs_da_state_kill_altpath(xfs_da_state_t *state);
95 /*========================================================================
96 * Routines used for growing the Btree.
97 *========================================================================*/
100 * Create the initial contents of an intermediate node.
103 xfs_da_node_create(xfs_da_args_t *args, xfs_dablk_t blkno, int level,
104 xfs_dabuf_t **bpp, int whichfork)
106 xfs_da_intnode_t *node;
107 xfs_dabuf_t *bp;
108 int error;
109 xfs_trans_t *tp;
111 tp = args->trans;
112 error = xfs_da_get_buf(tp, args->dp, blkno, -1, &bp, whichfork);
113 if (error)
114 return(error);
115 ASSERT(bp != NULL);
116 node = bp->data;
117 node->hdr.info.forw = 0;
118 node->hdr.info.back = 0;
119 node->hdr.info.magic = cpu_to_be16(XFS_DA_NODE_MAGIC);
120 node->hdr.info.pad = 0;
121 node->hdr.count = 0;
122 node->hdr.level = cpu_to_be16(level);
124 xfs_da_log_buf(tp, bp,
125 XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr)));
127 *bpp = bp;
128 return(0);
132 * Split a leaf node, rebalance, then possibly split
133 * intermediate nodes, rebalance, etc.
135 int /* error */
136 xfs_da_split(xfs_da_state_t *state)
138 xfs_da_state_blk_t *oldblk, *newblk, *addblk;
139 xfs_da_intnode_t *node;
140 xfs_dabuf_t *bp;
141 int max, action, error, i;
144 * Walk back up the tree splitting/inserting/adjusting as necessary.
145 * If we need to insert and there isn't room, split the node, then
146 * decide which fragment to insert the new block from below into.
147 * Note that we may split the root this way, but we need more fixup.
149 max = state->path.active - 1;
150 ASSERT((max >= 0) && (max < XFS_DA_NODE_MAXDEPTH));
151 ASSERT(state->path.blk[max].magic == XFS_ATTR_LEAF_MAGIC ||
152 state->path.blk[max].magic == XFS_DIR2_LEAFN_MAGIC);
154 addblk = &state->path.blk[max]; /* initial dummy value */
155 for (i = max; (i >= 0) && addblk; state->path.active--, i--) {
156 oldblk = &state->path.blk[i];
157 newblk = &state->altpath.blk[i];
160 * If a leaf node then
161 * Allocate a new leaf node, then rebalance across them.
162 * else if an intermediate node then
163 * We split on the last layer, must we split the node?
165 switch (oldblk->magic) {
166 case XFS_ATTR_LEAF_MAGIC:
167 error = xfs_attr_leaf_split(state, oldblk, newblk);
168 if ((error != 0) && (error != ENOSPC)) {
169 return(error); /* GROT: attr is inconsistent */
171 if (!error) {
172 addblk = newblk;
173 break;
176 * Entry wouldn't fit, split the leaf again.
178 state->extravalid = 1;
179 if (state->inleaf) {
180 state->extraafter = 0; /* before newblk */
181 error = xfs_attr_leaf_split(state, oldblk,
182 &state->extrablk);
183 } else {
184 state->extraafter = 1; /* after newblk */
185 error = xfs_attr_leaf_split(state, newblk,
186 &state->extrablk);
188 if (error)
189 return(error); /* GROT: attr inconsistent */
190 addblk = newblk;
191 break;
192 case XFS_DIR2_LEAFN_MAGIC:
193 error = xfs_dir2_leafn_split(state, oldblk, newblk);
194 if (error)
195 return error;
196 addblk = newblk;
197 break;
198 case XFS_DA_NODE_MAGIC:
199 error = xfs_da_node_split(state, oldblk, newblk, addblk,
200 max - i, &action);
201 xfs_da_buf_done(addblk->bp);
202 addblk->bp = NULL;
203 if (error)
204 return(error); /* GROT: dir is inconsistent */
206 * Record the newly split block for the next time thru?
208 if (action)
209 addblk = newblk;
210 else
211 addblk = NULL;
212 break;
216 * Update the btree to show the new hashval for this child.
218 xfs_da_fixhashpath(state, &state->path);
220 * If we won't need this block again, it's getting dropped
221 * from the active path by the loop control, so we need
222 * to mark it done now.
224 if (i > 0 || !addblk)
225 xfs_da_buf_done(oldblk->bp);
227 if (!addblk)
228 return(0);
231 * Split the root node.
233 ASSERT(state->path.active == 0);
234 oldblk = &state->path.blk[0];
235 error = xfs_da_root_split(state, oldblk, addblk);
236 if (error) {
237 xfs_da_buf_done(oldblk->bp);
238 xfs_da_buf_done(addblk->bp);
239 addblk->bp = NULL;
240 return(error); /* GROT: dir is inconsistent */
244 * Update pointers to the node which used to be block 0 and
245 * just got bumped because of the addition of a new root node.
246 * There might be three blocks involved if a double split occurred,
247 * and the original block 0 could be at any position in the list.
250 node = oldblk->bp->data;
251 if (node->hdr.info.forw) {
252 if (be32_to_cpu(node->hdr.info.forw) == addblk->blkno) {
253 bp = addblk->bp;
254 } else {
255 ASSERT(state->extravalid);
256 bp = state->extrablk.bp;
258 node = bp->data;
259 node->hdr.info.back = cpu_to_be32(oldblk->blkno);
260 xfs_da_log_buf(state->args->trans, bp,
261 XFS_DA_LOGRANGE(node, &node->hdr.info,
262 sizeof(node->hdr.info)));
264 node = oldblk->bp->data;
265 if (node->hdr.info.back) {
266 if (be32_to_cpu(node->hdr.info.back) == addblk->blkno) {
267 bp = addblk->bp;
268 } else {
269 ASSERT(state->extravalid);
270 bp = state->extrablk.bp;
272 node = bp->data;
273 node->hdr.info.forw = cpu_to_be32(oldblk->blkno);
274 xfs_da_log_buf(state->args->trans, bp,
275 XFS_DA_LOGRANGE(node, &node->hdr.info,
276 sizeof(node->hdr.info)));
278 xfs_da_buf_done(oldblk->bp);
279 xfs_da_buf_done(addblk->bp);
280 addblk->bp = NULL;
281 return(0);
285 * Split the root. We have to create a new root and point to the two
286 * parts (the split old root) that we just created. Copy block zero to
287 * the EOF, extending the inode in process.
289 STATIC int /* error */
290 xfs_da_root_split(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
291 xfs_da_state_blk_t *blk2)
293 xfs_da_intnode_t *node, *oldroot;
294 xfs_da_args_t *args;
295 xfs_dablk_t blkno;
296 xfs_dabuf_t *bp;
297 int error, size;
298 xfs_inode_t *dp;
299 xfs_trans_t *tp;
300 xfs_mount_t *mp;
301 xfs_dir2_leaf_t *leaf;
304 * Copy the existing (incorrect) block from the root node position
305 * to a free space somewhere.
307 args = state->args;
308 ASSERT(args != NULL);
309 error = xfs_da_grow_inode(args, &blkno);
310 if (error)
311 return(error);
312 dp = args->dp;
313 tp = args->trans;
314 mp = state->mp;
315 error = xfs_da_get_buf(tp, dp, blkno, -1, &bp, args->whichfork);
316 if (error)
317 return(error);
318 ASSERT(bp != NULL);
319 node = bp->data;
320 oldroot = blk1->bp->data;
321 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC)) {
322 size = (int)((char *)&oldroot->btree[be16_to_cpu(oldroot->hdr.count)] -
323 (char *)oldroot);
324 } else {
325 ASSERT(oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC));
326 leaf = (xfs_dir2_leaf_t *)oldroot;
327 size = (int)((char *)&leaf->ents[be16_to_cpu(leaf->hdr.count)] -
328 (char *)leaf);
330 memcpy(node, oldroot, size);
331 xfs_da_log_buf(tp, bp, 0, size - 1);
332 xfs_da_buf_done(blk1->bp);
333 blk1->bp = bp;
334 blk1->blkno = blkno;
337 * Set up the new root node.
339 error = xfs_da_node_create(args,
340 (args->whichfork == XFS_DATA_FORK) ? mp->m_dirleafblk : 0,
341 be16_to_cpu(node->hdr.level) + 1, &bp, args->whichfork);
342 if (error)
343 return(error);
344 node = bp->data;
345 node->btree[0].hashval = cpu_to_be32(blk1->hashval);
346 node->btree[0].before = cpu_to_be32(blk1->blkno);
347 node->btree[1].hashval = cpu_to_be32(blk2->hashval);
348 node->btree[1].before = cpu_to_be32(blk2->blkno);
349 node->hdr.count = cpu_to_be16(2);
351 #ifdef DEBUG
352 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)) {
353 ASSERT(blk1->blkno >= mp->m_dirleafblk &&
354 blk1->blkno < mp->m_dirfreeblk);
355 ASSERT(blk2->blkno >= mp->m_dirleafblk &&
356 blk2->blkno < mp->m_dirfreeblk);
358 #endif
360 /* Header is already logged by xfs_da_node_create */
361 xfs_da_log_buf(tp, bp,
362 XFS_DA_LOGRANGE(node, node->btree,
363 sizeof(xfs_da_node_entry_t) * 2));
364 xfs_da_buf_done(bp);
366 return(0);
370 * Split the node, rebalance, then add the new entry.
372 STATIC int /* error */
373 xfs_da_node_split(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,
374 xfs_da_state_blk_t *newblk,
375 xfs_da_state_blk_t *addblk,
376 int treelevel, int *result)
378 xfs_da_intnode_t *node;
379 xfs_dablk_t blkno;
380 int newcount, error;
381 int useextra;
383 node = oldblk->bp->data;
384 ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
387 * With V2 dirs the extra block is data or freespace.
389 useextra = state->extravalid && state->args->whichfork == XFS_ATTR_FORK;
390 newcount = 1 + useextra;
392 * Do we have to split the node?
394 if ((be16_to_cpu(node->hdr.count) + newcount) > state->node_ents) {
396 * Allocate a new node, add to the doubly linked chain of
397 * nodes, then move some of our excess entries into it.
399 error = xfs_da_grow_inode(state->args, &blkno);
400 if (error)
401 return(error); /* GROT: dir is inconsistent */
403 error = xfs_da_node_create(state->args, blkno, treelevel,
404 &newblk->bp, state->args->whichfork);
405 if (error)
406 return(error); /* GROT: dir is inconsistent */
407 newblk->blkno = blkno;
408 newblk->magic = XFS_DA_NODE_MAGIC;
409 xfs_da_node_rebalance(state, oldblk, newblk);
410 error = xfs_da_blk_link(state, oldblk, newblk);
411 if (error)
412 return(error);
413 *result = 1;
414 } else {
415 *result = 0;
419 * Insert the new entry(s) into the correct block
420 * (updating last hashval in the process).
422 * xfs_da_node_add() inserts BEFORE the given index,
423 * and as a result of using node_lookup_int() we always
424 * point to a valid entry (not after one), but a split
425 * operation always results in a new block whose hashvals
426 * FOLLOW the current block.
428 * If we had double-split op below us, then add the extra block too.
430 node = oldblk->bp->data;
431 if (oldblk->index <= be16_to_cpu(node->hdr.count)) {
432 oldblk->index++;
433 xfs_da_node_add(state, oldblk, addblk);
434 if (useextra) {
435 if (state->extraafter)
436 oldblk->index++;
437 xfs_da_node_add(state, oldblk, &state->extrablk);
438 state->extravalid = 0;
440 } else {
441 newblk->index++;
442 xfs_da_node_add(state, newblk, addblk);
443 if (useextra) {
444 if (state->extraafter)
445 newblk->index++;
446 xfs_da_node_add(state, newblk, &state->extrablk);
447 state->extravalid = 0;
451 return(0);
455 * Balance the btree elements between two intermediate nodes,
456 * usually one full and one empty.
458 * NOTE: if blk2 is empty, then it will get the upper half of blk1.
460 STATIC void
461 xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
462 xfs_da_state_blk_t *blk2)
464 xfs_da_intnode_t *node1, *node2, *tmpnode;
465 xfs_da_node_entry_t *btree_s, *btree_d;
466 int count, tmp;
467 xfs_trans_t *tp;
469 node1 = blk1->bp->data;
470 node2 = blk2->bp->data;
472 * Figure out how many entries need to move, and in which direction.
473 * Swap the nodes around if that makes it simpler.
475 if ((be16_to_cpu(node1->hdr.count) > 0) && (be16_to_cpu(node2->hdr.count) > 0) &&
476 ((be32_to_cpu(node2->btree[0].hashval) < be32_to_cpu(node1->btree[0].hashval)) ||
477 (be32_to_cpu(node2->btree[be16_to_cpu(node2->hdr.count)-1].hashval) <
478 be32_to_cpu(node1->btree[be16_to_cpu(node1->hdr.count)-1].hashval)))) {
479 tmpnode = node1;
480 node1 = node2;
481 node2 = tmpnode;
483 ASSERT(node1->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
484 ASSERT(node2->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
485 count = (be16_to_cpu(node1->hdr.count) - be16_to_cpu(node2->hdr.count)) / 2;
486 if (count == 0)
487 return;
488 tp = state->args->trans;
490 * Two cases: high-to-low and low-to-high.
492 if (count > 0) {
494 * Move elements in node2 up to make a hole.
496 if ((tmp = be16_to_cpu(node2->hdr.count)) > 0) {
497 tmp *= (uint)sizeof(xfs_da_node_entry_t);
498 btree_s = &node2->btree[0];
499 btree_d = &node2->btree[count];
500 memmove(btree_d, btree_s, tmp);
504 * Move the req'd B-tree elements from high in node1 to
505 * low in node2.
507 be16_add_cpu(&node2->hdr.count, count);
508 tmp = count * (uint)sizeof(xfs_da_node_entry_t);
509 btree_s = &node1->btree[be16_to_cpu(node1->hdr.count) - count];
510 btree_d = &node2->btree[0];
511 memcpy(btree_d, btree_s, tmp);
512 be16_add_cpu(&node1->hdr.count, -count);
513 } else {
515 * Move the req'd B-tree elements from low in node2 to
516 * high in node1.
518 count = -count;
519 tmp = count * (uint)sizeof(xfs_da_node_entry_t);
520 btree_s = &node2->btree[0];
521 btree_d = &node1->btree[be16_to_cpu(node1->hdr.count)];
522 memcpy(btree_d, btree_s, tmp);
523 be16_add_cpu(&node1->hdr.count, count);
524 xfs_da_log_buf(tp, blk1->bp,
525 XFS_DA_LOGRANGE(node1, btree_d, tmp));
528 * Move elements in node2 down to fill the hole.
530 tmp = be16_to_cpu(node2->hdr.count) - count;
531 tmp *= (uint)sizeof(xfs_da_node_entry_t);
532 btree_s = &node2->btree[count];
533 btree_d = &node2->btree[0];
534 memmove(btree_d, btree_s, tmp);
535 be16_add_cpu(&node2->hdr.count, -count);
539 * Log header of node 1 and all current bits of node 2.
541 xfs_da_log_buf(tp, blk1->bp,
542 XFS_DA_LOGRANGE(node1, &node1->hdr, sizeof(node1->hdr)));
543 xfs_da_log_buf(tp, blk2->bp,
544 XFS_DA_LOGRANGE(node2, &node2->hdr,
545 sizeof(node2->hdr) +
546 sizeof(node2->btree[0]) * be16_to_cpu(node2->hdr.count)));
549 * Record the last hashval from each block for upward propagation.
550 * (note: don't use the swapped node pointers)
552 node1 = blk1->bp->data;
553 node2 = blk2->bp->data;
554 blk1->hashval = be32_to_cpu(node1->btree[be16_to_cpu(node1->hdr.count)-1].hashval);
555 blk2->hashval = be32_to_cpu(node2->btree[be16_to_cpu(node2->hdr.count)-1].hashval);
558 * Adjust the expected index for insertion.
560 if (blk1->index >= be16_to_cpu(node1->hdr.count)) {
561 blk2->index = blk1->index - be16_to_cpu(node1->hdr.count);
562 blk1->index = be16_to_cpu(node1->hdr.count) + 1; /* make it invalid */
567 * Add a new entry to an intermediate node.
569 STATIC void
570 xfs_da_node_add(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,
571 xfs_da_state_blk_t *newblk)
573 xfs_da_intnode_t *node;
574 xfs_da_node_entry_t *btree;
575 int tmp;
577 node = oldblk->bp->data;
578 ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
579 ASSERT((oldblk->index >= 0) && (oldblk->index <= be16_to_cpu(node->hdr.count)));
580 ASSERT(newblk->blkno != 0);
581 if (state->args->whichfork == XFS_DATA_FORK)
582 ASSERT(newblk->blkno >= state->mp->m_dirleafblk &&
583 newblk->blkno < state->mp->m_dirfreeblk);
586 * We may need to make some room before we insert the new node.
588 tmp = 0;
589 btree = &node->btree[ oldblk->index ];
590 if (oldblk->index < be16_to_cpu(node->hdr.count)) {
591 tmp = (be16_to_cpu(node->hdr.count) - oldblk->index) * (uint)sizeof(*btree);
592 memmove(btree + 1, btree, tmp);
594 btree->hashval = cpu_to_be32(newblk->hashval);
595 btree->before = cpu_to_be32(newblk->blkno);
596 xfs_da_log_buf(state->args->trans, oldblk->bp,
597 XFS_DA_LOGRANGE(node, btree, tmp + sizeof(*btree)));
598 be16_add_cpu(&node->hdr.count, 1);
599 xfs_da_log_buf(state->args->trans, oldblk->bp,
600 XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr)));
603 * Copy the last hash value from the oldblk to propagate upwards.
605 oldblk->hashval = be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1 ].hashval);
608 /*========================================================================
609 * Routines used for shrinking the Btree.
610 *========================================================================*/
613 * Deallocate an empty leaf node, remove it from its parent,
614 * possibly deallocating that block, etc...
617 xfs_da_join(xfs_da_state_t *state)
619 xfs_da_state_blk_t *drop_blk, *save_blk;
620 int action, error;
622 action = 0;
623 drop_blk = &state->path.blk[ state->path.active-1 ];
624 save_blk = &state->altpath.blk[ state->path.active-1 ];
625 ASSERT(state->path.blk[0].magic == XFS_DA_NODE_MAGIC);
626 ASSERT(drop_blk->magic == XFS_ATTR_LEAF_MAGIC ||
627 drop_blk->magic == XFS_DIR2_LEAFN_MAGIC);
630 * Walk back up the tree joining/deallocating as necessary.
631 * When we stop dropping blocks, break out.
633 for ( ; state->path.active >= 2; drop_blk--, save_blk--,
634 state->path.active--) {
636 * See if we can combine the block with a neighbor.
637 * (action == 0) => no options, just leave
638 * (action == 1) => coalesce, then unlink
639 * (action == 2) => block empty, unlink it
641 switch (drop_blk->magic) {
642 case XFS_ATTR_LEAF_MAGIC:
643 error = xfs_attr_leaf_toosmall(state, &action);
644 if (error)
645 return(error);
646 if (action == 0)
647 return(0);
648 xfs_attr_leaf_unbalance(state, drop_blk, save_blk);
649 break;
650 case XFS_DIR2_LEAFN_MAGIC:
651 error = xfs_dir2_leafn_toosmall(state, &action);
652 if (error)
653 return error;
654 if (action == 0)
655 return 0;
656 xfs_dir2_leafn_unbalance(state, drop_blk, save_blk);
657 break;
658 case XFS_DA_NODE_MAGIC:
660 * Remove the offending node, fixup hashvals,
661 * check for a toosmall neighbor.
663 xfs_da_node_remove(state, drop_blk);
664 xfs_da_fixhashpath(state, &state->path);
665 error = xfs_da_node_toosmall(state, &action);
666 if (error)
667 return(error);
668 if (action == 0)
669 return 0;
670 xfs_da_node_unbalance(state, drop_blk, save_blk);
671 break;
673 xfs_da_fixhashpath(state, &state->altpath);
674 error = xfs_da_blk_unlink(state, drop_blk, save_blk);
675 xfs_da_state_kill_altpath(state);
676 if (error)
677 return(error);
678 error = xfs_da_shrink_inode(state->args, drop_blk->blkno,
679 drop_blk->bp);
680 drop_blk->bp = NULL;
681 if (error)
682 return(error);
685 * We joined all the way to the top. If it turns out that
686 * we only have one entry in the root, make the child block
687 * the new root.
689 xfs_da_node_remove(state, drop_blk);
690 xfs_da_fixhashpath(state, &state->path);
691 error = xfs_da_root_join(state, &state->path.blk[0]);
692 return(error);
695 #ifdef DEBUG
696 static void
697 xfs_da_blkinfo_onlychild_validate(struct xfs_da_blkinfo *blkinfo, __u16 level)
699 __be16 magic = blkinfo->magic;
701 if (level == 1) {
702 ASSERT(magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
703 magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
704 } else
705 ASSERT(magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
706 ASSERT(!blkinfo->forw);
707 ASSERT(!blkinfo->back);
709 #else /* !DEBUG */
710 #define xfs_da_blkinfo_onlychild_validate(blkinfo, level)
711 #endif /* !DEBUG */
714 * We have only one entry in the root. Copy the only remaining child of
715 * the old root to block 0 as the new root node.
717 STATIC int
718 xfs_da_root_join(xfs_da_state_t *state, xfs_da_state_blk_t *root_blk)
720 xfs_da_intnode_t *oldroot;
721 xfs_da_args_t *args;
722 xfs_dablk_t child;
723 xfs_dabuf_t *bp;
724 int error;
726 args = state->args;
727 ASSERT(args != NULL);
728 ASSERT(root_blk->magic == XFS_DA_NODE_MAGIC);
729 oldroot = root_blk->bp->data;
730 ASSERT(oldroot->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
731 ASSERT(!oldroot->hdr.info.forw);
732 ASSERT(!oldroot->hdr.info.back);
735 * If the root has more than one child, then don't do anything.
737 if (be16_to_cpu(oldroot->hdr.count) > 1)
738 return(0);
741 * Read in the (only) child block, then copy those bytes into
742 * the root block's buffer and free the original child block.
744 child = be32_to_cpu(oldroot->btree[0].before);
745 ASSERT(child != 0);
746 error = xfs_da_read_buf(args->trans, args->dp, child, -1, &bp,
747 args->whichfork);
748 if (error)
749 return(error);
750 ASSERT(bp != NULL);
751 xfs_da_blkinfo_onlychild_validate(bp->data,
752 be16_to_cpu(oldroot->hdr.level));
754 memcpy(root_blk->bp->data, bp->data, state->blocksize);
755 xfs_da_log_buf(args->trans, root_blk->bp, 0, state->blocksize - 1);
756 error = xfs_da_shrink_inode(args, child, bp);
757 return(error);
761 * Check a node block and its neighbors to see if the block should be
762 * collapsed into one or the other neighbor. Always keep the block
763 * with the smaller block number.
764 * If the current block is over 50% full, don't try to join it, return 0.
765 * If the block is empty, fill in the state structure and return 2.
766 * If it can be collapsed, fill in the state structure and return 1.
767 * If nothing can be done, return 0.
769 STATIC int
770 xfs_da_node_toosmall(xfs_da_state_t *state, int *action)
772 xfs_da_intnode_t *node;
773 xfs_da_state_blk_t *blk;
774 xfs_da_blkinfo_t *info;
775 int count, forward, error, retval, i;
776 xfs_dablk_t blkno;
777 xfs_dabuf_t *bp;
780 * Check for the degenerate case of the block being over 50% full.
781 * If so, it's not worth even looking to see if we might be able
782 * to coalesce with a sibling.
784 blk = &state->path.blk[ state->path.active-1 ];
785 info = blk->bp->data;
786 ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
787 node = (xfs_da_intnode_t *)info;
788 count = be16_to_cpu(node->hdr.count);
789 if (count > (state->node_ents >> 1)) {
790 *action = 0; /* blk over 50%, don't try to join */
791 return(0); /* blk over 50%, don't try to join */
795 * Check for the degenerate case of the block being empty.
796 * If the block is empty, we'll simply delete it, no need to
797 * coalesce it with a sibling block. We choose (arbitrarily)
798 * to merge with the forward block unless it is NULL.
800 if (count == 0) {
802 * Make altpath point to the block we want to keep and
803 * path point to the block we want to drop (this one).
805 forward = (info->forw != 0);
806 memcpy(&state->altpath, &state->path, sizeof(state->path));
807 error = xfs_da_path_shift(state, &state->altpath, forward,
808 0, &retval);
809 if (error)
810 return(error);
811 if (retval) {
812 *action = 0;
813 } else {
814 *action = 2;
816 return(0);
820 * Examine each sibling block to see if we can coalesce with
821 * at least 25% free space to spare. We need to figure out
822 * whether to merge with the forward or the backward block.
823 * We prefer coalescing with the lower numbered sibling so as
824 * to shrink a directory over time.
826 /* start with smaller blk num */
827 forward = (be32_to_cpu(info->forw) < be32_to_cpu(info->back));
828 for (i = 0; i < 2; forward = !forward, i++) {
829 if (forward)
830 blkno = be32_to_cpu(info->forw);
831 else
832 blkno = be32_to_cpu(info->back);
833 if (blkno == 0)
834 continue;
835 error = xfs_da_read_buf(state->args->trans, state->args->dp,
836 blkno, -1, &bp, state->args->whichfork);
837 if (error)
838 return(error);
839 ASSERT(bp != NULL);
841 node = (xfs_da_intnode_t *)info;
842 count = state->node_ents;
843 count -= state->node_ents >> 2;
844 count -= be16_to_cpu(node->hdr.count);
845 node = bp->data;
846 ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
847 count -= be16_to_cpu(node->hdr.count);
848 xfs_da_brelse(state->args->trans, bp);
849 if (count >= 0)
850 break; /* fits with at least 25% to spare */
852 if (i >= 2) {
853 *action = 0;
854 return(0);
858 * Make altpath point to the block we want to keep (the lower
859 * numbered block) and path point to the block we want to drop.
861 memcpy(&state->altpath, &state->path, sizeof(state->path));
862 if (blkno < blk->blkno) {
863 error = xfs_da_path_shift(state, &state->altpath, forward,
864 0, &retval);
865 if (error) {
866 return(error);
868 if (retval) {
869 *action = 0;
870 return(0);
872 } else {
873 error = xfs_da_path_shift(state, &state->path, forward,
874 0, &retval);
875 if (error) {
876 return(error);
878 if (retval) {
879 *action = 0;
880 return(0);
883 *action = 1;
884 return(0);
888 * Walk back up the tree adjusting hash values as necessary,
889 * when we stop making changes, return.
891 void
892 xfs_da_fixhashpath(xfs_da_state_t *state, xfs_da_state_path_t *path)
894 xfs_da_state_blk_t *blk;
895 xfs_da_intnode_t *node;
896 xfs_da_node_entry_t *btree;
897 xfs_dahash_t lasthash=0;
898 int level, count;
900 level = path->active-1;
901 blk = &path->blk[ level ];
902 switch (blk->magic) {
903 case XFS_ATTR_LEAF_MAGIC:
904 lasthash = xfs_attr_leaf_lasthash(blk->bp, &count);
905 if (count == 0)
906 return;
907 break;
908 case XFS_DIR2_LEAFN_MAGIC:
909 lasthash = xfs_dir2_leafn_lasthash(blk->bp, &count);
910 if (count == 0)
911 return;
912 break;
913 case XFS_DA_NODE_MAGIC:
914 lasthash = xfs_da_node_lasthash(blk->bp, &count);
915 if (count == 0)
916 return;
917 break;
919 for (blk--, level--; level >= 0; blk--, level--) {
920 node = blk->bp->data;
921 ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
922 btree = &node->btree[ blk->index ];
923 if (be32_to_cpu(btree->hashval) == lasthash)
924 break;
925 blk->hashval = lasthash;
926 btree->hashval = cpu_to_be32(lasthash);
927 xfs_da_log_buf(state->args->trans, blk->bp,
928 XFS_DA_LOGRANGE(node, btree, sizeof(*btree)));
930 lasthash = be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1].hashval);
935 * Remove an entry from an intermediate node.
937 STATIC void
938 xfs_da_node_remove(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk)
940 xfs_da_intnode_t *node;
941 xfs_da_node_entry_t *btree;
942 int tmp;
944 node = drop_blk->bp->data;
945 ASSERT(drop_blk->index < be16_to_cpu(node->hdr.count));
946 ASSERT(drop_blk->index >= 0);
949 * Copy over the offending entry, or just zero it out.
951 btree = &node->btree[drop_blk->index];
952 if (drop_blk->index < (be16_to_cpu(node->hdr.count)-1)) {
953 tmp = be16_to_cpu(node->hdr.count) - drop_blk->index - 1;
954 tmp *= (uint)sizeof(xfs_da_node_entry_t);
955 memmove(btree, btree + 1, tmp);
956 xfs_da_log_buf(state->args->trans, drop_blk->bp,
957 XFS_DA_LOGRANGE(node, btree, tmp));
958 btree = &node->btree[be16_to_cpu(node->hdr.count)-1];
960 memset((char *)btree, 0, sizeof(xfs_da_node_entry_t));
961 xfs_da_log_buf(state->args->trans, drop_blk->bp,
962 XFS_DA_LOGRANGE(node, btree, sizeof(*btree)));
963 be16_add_cpu(&node->hdr.count, -1);
964 xfs_da_log_buf(state->args->trans, drop_blk->bp,
965 XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr)));
968 * Copy the last hash value from the block to propagate upwards.
970 btree--;
971 drop_blk->hashval = be32_to_cpu(btree->hashval);
975 * Unbalance the btree elements between two intermediate nodes,
976 * move all Btree elements from one node into another.
978 STATIC void
979 xfs_da_node_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
980 xfs_da_state_blk_t *save_blk)
982 xfs_da_intnode_t *drop_node, *save_node;
983 xfs_da_node_entry_t *btree;
984 int tmp;
985 xfs_trans_t *tp;
987 drop_node = drop_blk->bp->data;
988 save_node = save_blk->bp->data;
989 ASSERT(drop_node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
990 ASSERT(save_node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
991 tp = state->args->trans;
994 * If the dying block has lower hashvals, then move all the
995 * elements in the remaining block up to make a hole.
997 if ((be32_to_cpu(drop_node->btree[0].hashval) < be32_to_cpu(save_node->btree[ 0 ].hashval)) ||
998 (be32_to_cpu(drop_node->btree[be16_to_cpu(drop_node->hdr.count)-1].hashval) <
999 be32_to_cpu(save_node->btree[be16_to_cpu(save_node->hdr.count)-1].hashval)))
1001 btree = &save_node->btree[be16_to_cpu(drop_node->hdr.count)];
1002 tmp = be16_to_cpu(save_node->hdr.count) * (uint)sizeof(xfs_da_node_entry_t);
1003 memmove(btree, &save_node->btree[0], tmp);
1004 btree = &save_node->btree[0];
1005 xfs_da_log_buf(tp, save_blk->bp,
1006 XFS_DA_LOGRANGE(save_node, btree,
1007 (be16_to_cpu(save_node->hdr.count) + be16_to_cpu(drop_node->hdr.count)) *
1008 sizeof(xfs_da_node_entry_t)));
1009 } else {
1010 btree = &save_node->btree[be16_to_cpu(save_node->hdr.count)];
1011 xfs_da_log_buf(tp, save_blk->bp,
1012 XFS_DA_LOGRANGE(save_node, btree,
1013 be16_to_cpu(drop_node->hdr.count) *
1014 sizeof(xfs_da_node_entry_t)));
1018 * Move all the B-tree elements from drop_blk to save_blk.
1020 tmp = be16_to_cpu(drop_node->hdr.count) * (uint)sizeof(xfs_da_node_entry_t);
1021 memcpy(btree, &drop_node->btree[0], tmp);
1022 be16_add_cpu(&save_node->hdr.count, be16_to_cpu(drop_node->hdr.count));
1024 xfs_da_log_buf(tp, save_blk->bp,
1025 XFS_DA_LOGRANGE(save_node, &save_node->hdr,
1026 sizeof(save_node->hdr)));
1029 * Save the last hashval in the remaining block for upward propagation.
1031 save_blk->hashval = be32_to_cpu(save_node->btree[be16_to_cpu(save_node->hdr.count)-1].hashval);
1034 /*========================================================================
1035 * Routines used for finding things in the Btree.
1036 *========================================================================*/
1039 * Walk down the Btree looking for a particular filename, filling
1040 * in the state structure as we go.
1042 * We will set the state structure to point to each of the elements
1043 * in each of the nodes where either the hashval is or should be.
1045 * We support duplicate hashval's so for each entry in the current
1046 * node that could contain the desired hashval, descend. This is a
1047 * pruned depth-first tree search.
1049 int /* error */
1050 xfs_da_node_lookup_int(xfs_da_state_t *state, int *result)
1052 xfs_da_state_blk_t *blk;
1053 xfs_da_blkinfo_t *curr;
1054 xfs_da_intnode_t *node;
1055 xfs_da_node_entry_t *btree;
1056 xfs_dablk_t blkno;
1057 int probe, span, max, error, retval;
1058 xfs_dahash_t hashval, btreehashval;
1059 xfs_da_args_t *args;
1061 args = state->args;
1064 * Descend thru the B-tree searching each level for the right
1065 * node to use, until the right hashval is found.
1067 blkno = (args->whichfork == XFS_DATA_FORK)? state->mp->m_dirleafblk : 0;
1068 for (blk = &state->path.blk[0], state->path.active = 1;
1069 state->path.active <= XFS_DA_NODE_MAXDEPTH;
1070 blk++, state->path.active++) {
1072 * Read the next node down in the tree.
1074 blk->blkno = blkno;
1075 error = xfs_da_read_buf(args->trans, args->dp, blkno,
1076 -1, &blk->bp, args->whichfork);
1077 if (error) {
1078 blk->blkno = 0;
1079 state->path.active--;
1080 return(error);
1082 curr = blk->bp->data;
1083 blk->magic = be16_to_cpu(curr->magic);
1084 ASSERT(blk->magic == XFS_DA_NODE_MAGIC ||
1085 blk->magic == XFS_DIR2_LEAFN_MAGIC ||
1086 blk->magic == XFS_ATTR_LEAF_MAGIC);
1089 * Search an intermediate node for a match.
1091 if (blk->magic == XFS_DA_NODE_MAGIC) {
1092 node = blk->bp->data;
1093 max = be16_to_cpu(node->hdr.count);
1094 blk->hashval = be32_to_cpu(node->btree[max-1].hashval);
1097 * Binary search. (note: small blocks will skip loop)
1099 probe = span = max / 2;
1100 hashval = args->hashval;
1101 for (btree = &node->btree[probe]; span > 4;
1102 btree = &node->btree[probe]) {
1103 span /= 2;
1104 btreehashval = be32_to_cpu(btree->hashval);
1105 if (btreehashval < hashval)
1106 probe += span;
1107 else if (btreehashval > hashval)
1108 probe -= span;
1109 else
1110 break;
1112 ASSERT((probe >= 0) && (probe < max));
1113 ASSERT((span <= 4) || (be32_to_cpu(btree->hashval) == hashval));
1116 * Since we may have duplicate hashval's, find the first
1117 * matching hashval in the node.
1119 while ((probe > 0) && (be32_to_cpu(btree->hashval) >= hashval)) {
1120 btree--;
1121 probe--;
1123 while ((probe < max) && (be32_to_cpu(btree->hashval) < hashval)) {
1124 btree++;
1125 probe++;
1129 * Pick the right block to descend on.
1131 if (probe == max) {
1132 blk->index = max-1;
1133 blkno = be32_to_cpu(node->btree[max-1].before);
1134 } else {
1135 blk->index = probe;
1136 blkno = be32_to_cpu(btree->before);
1138 } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
1139 blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL);
1140 break;
1141 } else if (blk->magic == XFS_DIR2_LEAFN_MAGIC) {
1142 blk->hashval = xfs_dir2_leafn_lasthash(blk->bp, NULL);
1143 break;
1148 * A leaf block that ends in the hashval that we are interested in
1149 * (final hashval == search hashval) means that the next block may
1150 * contain more entries with the same hashval, shift upward to the
1151 * next leaf and keep searching.
1153 for (;;) {
1154 if (blk->magic == XFS_DIR2_LEAFN_MAGIC) {
1155 retval = xfs_dir2_leafn_lookup_int(blk->bp, args,
1156 &blk->index, state);
1157 } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
1158 retval = xfs_attr_leaf_lookup_int(blk->bp, args);
1159 blk->index = args->index;
1160 args->blkno = blk->blkno;
1161 } else {
1162 ASSERT(0);
1163 return XFS_ERROR(EFSCORRUPTED);
1165 if (((retval == ENOENT) || (retval == ENOATTR)) &&
1166 (blk->hashval == args->hashval)) {
1167 error = xfs_da_path_shift(state, &state->path, 1, 1,
1168 &retval);
1169 if (error)
1170 return(error);
1171 if (retval == 0) {
1172 continue;
1173 } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
1174 /* path_shift() gives ENOENT */
1175 retval = XFS_ERROR(ENOATTR);
1178 break;
1180 *result = retval;
1181 return(0);
1184 /*========================================================================
1185 * Utility routines.
1186 *========================================================================*/
1189 * Link a new block into a doubly linked list of blocks (of whatever type).
1191 int /* error */
1192 xfs_da_blk_link(xfs_da_state_t *state, xfs_da_state_blk_t *old_blk,
1193 xfs_da_state_blk_t *new_blk)
1195 xfs_da_blkinfo_t *old_info, *new_info, *tmp_info;
1196 xfs_da_args_t *args;
1197 int before=0, error;
1198 xfs_dabuf_t *bp;
1201 * Set up environment.
1203 args = state->args;
1204 ASSERT(args != NULL);
1205 old_info = old_blk->bp->data;
1206 new_info = new_blk->bp->data;
1207 ASSERT(old_blk->magic == XFS_DA_NODE_MAGIC ||
1208 old_blk->magic == XFS_DIR2_LEAFN_MAGIC ||
1209 old_blk->magic == XFS_ATTR_LEAF_MAGIC);
1210 ASSERT(old_blk->magic == be16_to_cpu(old_info->magic));
1211 ASSERT(new_blk->magic == be16_to_cpu(new_info->magic));
1212 ASSERT(old_blk->magic == new_blk->magic);
1214 switch (old_blk->magic) {
1215 case XFS_ATTR_LEAF_MAGIC:
1216 before = xfs_attr_leaf_order(old_blk->bp, new_blk->bp);
1217 break;
1218 case XFS_DIR2_LEAFN_MAGIC:
1219 before = xfs_dir2_leafn_order(old_blk->bp, new_blk->bp);
1220 break;
1221 case XFS_DA_NODE_MAGIC:
1222 before = xfs_da_node_order(old_blk->bp, new_blk->bp);
1223 break;
1227 * Link blocks in appropriate order.
1229 if (before) {
1231 * Link new block in before existing block.
1233 new_info->forw = cpu_to_be32(old_blk->blkno);
1234 new_info->back = old_info->back;
1235 if (old_info->back) {
1236 error = xfs_da_read_buf(args->trans, args->dp,
1237 be32_to_cpu(old_info->back),
1238 -1, &bp, args->whichfork);
1239 if (error)
1240 return(error);
1241 ASSERT(bp != NULL);
1242 tmp_info = bp->data;
1243 ASSERT(be16_to_cpu(tmp_info->magic) == be16_to_cpu(old_info->magic));
1244 ASSERT(be32_to_cpu(tmp_info->forw) == old_blk->blkno);
1245 tmp_info->forw = cpu_to_be32(new_blk->blkno);
1246 xfs_da_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
1247 xfs_da_buf_done(bp);
1249 old_info->back = cpu_to_be32(new_blk->blkno);
1250 } else {
1252 * Link new block in after existing block.
1254 new_info->forw = old_info->forw;
1255 new_info->back = cpu_to_be32(old_blk->blkno);
1256 if (old_info->forw) {
1257 error = xfs_da_read_buf(args->trans, args->dp,
1258 be32_to_cpu(old_info->forw),
1259 -1, &bp, args->whichfork);
1260 if (error)
1261 return(error);
1262 ASSERT(bp != NULL);
1263 tmp_info = bp->data;
1264 ASSERT(tmp_info->magic == old_info->magic);
1265 ASSERT(be32_to_cpu(tmp_info->back) == old_blk->blkno);
1266 tmp_info->back = cpu_to_be32(new_blk->blkno);
1267 xfs_da_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
1268 xfs_da_buf_done(bp);
1270 old_info->forw = cpu_to_be32(new_blk->blkno);
1273 xfs_da_log_buf(args->trans, old_blk->bp, 0, sizeof(*tmp_info) - 1);
1274 xfs_da_log_buf(args->trans, new_blk->bp, 0, sizeof(*tmp_info) - 1);
1275 return(0);
1279 * Compare two intermediate nodes for "order".
1281 STATIC int
1282 xfs_da_node_order(xfs_dabuf_t *node1_bp, xfs_dabuf_t *node2_bp)
1284 xfs_da_intnode_t *node1, *node2;
1286 node1 = node1_bp->data;
1287 node2 = node2_bp->data;
1288 ASSERT(node1->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC) &&
1289 node2->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
1290 if ((be16_to_cpu(node1->hdr.count) > 0) && (be16_to_cpu(node2->hdr.count) > 0) &&
1291 ((be32_to_cpu(node2->btree[0].hashval) <
1292 be32_to_cpu(node1->btree[0].hashval)) ||
1293 (be32_to_cpu(node2->btree[be16_to_cpu(node2->hdr.count)-1].hashval) <
1294 be32_to_cpu(node1->btree[be16_to_cpu(node1->hdr.count)-1].hashval)))) {
1295 return(1);
1297 return(0);
1301 * Pick up the last hashvalue from an intermediate node.
1303 STATIC uint
1304 xfs_da_node_lasthash(xfs_dabuf_t *bp, int *count)
1306 xfs_da_intnode_t *node;
1308 node = bp->data;
1309 ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
1310 if (count)
1311 *count = be16_to_cpu(node->hdr.count);
1312 if (!node->hdr.count)
1313 return(0);
1314 return be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1].hashval);
1318 * Unlink a block from a doubly linked list of blocks.
1320 STATIC int /* error */
1321 xfs_da_blk_unlink(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
1322 xfs_da_state_blk_t *save_blk)
1324 xfs_da_blkinfo_t *drop_info, *save_info, *tmp_info;
1325 xfs_da_args_t *args;
1326 xfs_dabuf_t *bp;
1327 int error;
1330 * Set up environment.
1332 args = state->args;
1333 ASSERT(args != NULL);
1334 save_info = save_blk->bp->data;
1335 drop_info = drop_blk->bp->data;
1336 ASSERT(save_blk->magic == XFS_DA_NODE_MAGIC ||
1337 save_blk->magic == XFS_DIR2_LEAFN_MAGIC ||
1338 save_blk->magic == XFS_ATTR_LEAF_MAGIC);
1339 ASSERT(save_blk->magic == be16_to_cpu(save_info->magic));
1340 ASSERT(drop_blk->magic == be16_to_cpu(drop_info->magic));
1341 ASSERT(save_blk->magic == drop_blk->magic);
1342 ASSERT((be32_to_cpu(save_info->forw) == drop_blk->blkno) ||
1343 (be32_to_cpu(save_info->back) == drop_blk->blkno));
1344 ASSERT((be32_to_cpu(drop_info->forw) == save_blk->blkno) ||
1345 (be32_to_cpu(drop_info->back) == save_blk->blkno));
1348 * Unlink the leaf block from the doubly linked chain of leaves.
1350 if (be32_to_cpu(save_info->back) == drop_blk->blkno) {
1351 save_info->back = drop_info->back;
1352 if (drop_info->back) {
1353 error = xfs_da_read_buf(args->trans, args->dp,
1354 be32_to_cpu(drop_info->back),
1355 -1, &bp, args->whichfork);
1356 if (error)
1357 return(error);
1358 ASSERT(bp != NULL);
1359 tmp_info = bp->data;
1360 ASSERT(tmp_info->magic == save_info->magic);
1361 ASSERT(be32_to_cpu(tmp_info->forw) == drop_blk->blkno);
1362 tmp_info->forw = cpu_to_be32(save_blk->blkno);
1363 xfs_da_log_buf(args->trans, bp, 0,
1364 sizeof(*tmp_info) - 1);
1365 xfs_da_buf_done(bp);
1367 } else {
1368 save_info->forw = drop_info->forw;
1369 if (drop_info->forw) {
1370 error = xfs_da_read_buf(args->trans, args->dp,
1371 be32_to_cpu(drop_info->forw),
1372 -1, &bp, args->whichfork);
1373 if (error)
1374 return(error);
1375 ASSERT(bp != NULL);
1376 tmp_info = bp->data;
1377 ASSERT(tmp_info->magic == save_info->magic);
1378 ASSERT(be32_to_cpu(tmp_info->back) == drop_blk->blkno);
1379 tmp_info->back = cpu_to_be32(save_blk->blkno);
1380 xfs_da_log_buf(args->trans, bp, 0,
1381 sizeof(*tmp_info) - 1);
1382 xfs_da_buf_done(bp);
1386 xfs_da_log_buf(args->trans, save_blk->bp, 0, sizeof(*save_info) - 1);
1387 return(0);
1391 * Move a path "forward" or "!forward" one block at the current level.
1393 * This routine will adjust a "path" to point to the next block
1394 * "forward" (higher hashvalues) or "!forward" (lower hashvals) in the
1395 * Btree, including updating pointers to the intermediate nodes between
1396 * the new bottom and the root.
1398 int /* error */
1399 xfs_da_path_shift(xfs_da_state_t *state, xfs_da_state_path_t *path,
1400 int forward, int release, int *result)
1402 xfs_da_state_blk_t *blk;
1403 xfs_da_blkinfo_t *info;
1404 xfs_da_intnode_t *node;
1405 xfs_da_args_t *args;
1406 xfs_dablk_t blkno=0;
1407 int level, error;
1410 * Roll up the Btree looking for the first block where our
1411 * current index is not at the edge of the block. Note that
1412 * we skip the bottom layer because we want the sibling block.
1414 args = state->args;
1415 ASSERT(args != NULL);
1416 ASSERT(path != NULL);
1417 ASSERT((path->active > 0) && (path->active < XFS_DA_NODE_MAXDEPTH));
1418 level = (path->active-1) - 1; /* skip bottom layer in path */
1419 for (blk = &path->blk[level]; level >= 0; blk--, level--) {
1420 ASSERT(blk->bp != NULL);
1421 node = blk->bp->data;
1422 ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
1423 if (forward && (blk->index < be16_to_cpu(node->hdr.count)-1)) {
1424 blk->index++;
1425 blkno = be32_to_cpu(node->btree[blk->index].before);
1426 break;
1427 } else if (!forward && (blk->index > 0)) {
1428 blk->index--;
1429 blkno = be32_to_cpu(node->btree[blk->index].before);
1430 break;
1433 if (level < 0) {
1434 *result = XFS_ERROR(ENOENT); /* we're out of our tree */
1435 ASSERT(args->op_flags & XFS_DA_OP_OKNOENT);
1436 return(0);
1440 * Roll down the edge of the subtree until we reach the
1441 * same depth we were at originally.
1443 for (blk++, level++; level < path->active; blk++, level++) {
1445 * Release the old block.
1446 * (if it's dirty, trans won't actually let go)
1448 if (release)
1449 xfs_da_brelse(args->trans, blk->bp);
1452 * Read the next child block.
1454 blk->blkno = blkno;
1455 error = xfs_da_read_buf(args->trans, args->dp, blkno, -1,
1456 &blk->bp, args->whichfork);
1457 if (error)
1458 return(error);
1459 ASSERT(blk->bp != NULL);
1460 info = blk->bp->data;
1461 ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
1462 info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
1463 info->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
1464 blk->magic = be16_to_cpu(info->magic);
1465 if (blk->magic == XFS_DA_NODE_MAGIC) {
1466 node = (xfs_da_intnode_t *)info;
1467 blk->hashval = be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1].hashval);
1468 if (forward)
1469 blk->index = 0;
1470 else
1471 blk->index = be16_to_cpu(node->hdr.count)-1;
1472 blkno = be32_to_cpu(node->btree[blk->index].before);
1473 } else {
1474 ASSERT(level == path->active-1);
1475 blk->index = 0;
1476 switch(blk->magic) {
1477 case XFS_ATTR_LEAF_MAGIC:
1478 blk->hashval = xfs_attr_leaf_lasthash(blk->bp,
1479 NULL);
1480 break;
1481 case XFS_DIR2_LEAFN_MAGIC:
1482 blk->hashval = xfs_dir2_leafn_lasthash(blk->bp,
1483 NULL);
1484 break;
1485 default:
1486 ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC ||
1487 blk->magic == XFS_DIR2_LEAFN_MAGIC);
1488 break;
1492 *result = 0;
1493 return(0);
1497 /*========================================================================
1498 * Utility routines.
1499 *========================================================================*/
1502 * Implement a simple hash on a character string.
1503 * Rotate the hash value by 7 bits, then XOR each character in.
1504 * This is implemented with some source-level loop unrolling.
1506 xfs_dahash_t
1507 xfs_da_hashname(const __uint8_t *name, int namelen)
1509 xfs_dahash_t hash;
1512 * Do four characters at a time as long as we can.
1514 for (hash = 0; namelen >= 4; namelen -= 4, name += 4)
1515 hash = (name[0] << 21) ^ (name[1] << 14) ^ (name[2] << 7) ^
1516 (name[3] << 0) ^ rol32(hash, 7 * 4);
1519 * Now do the rest of the characters.
1521 switch (namelen) {
1522 case 3:
1523 return (name[0] << 14) ^ (name[1] << 7) ^ (name[2] << 0) ^
1524 rol32(hash, 7 * 3);
1525 case 2:
1526 return (name[0] << 7) ^ (name[1] << 0) ^ rol32(hash, 7 * 2);
1527 case 1:
1528 return (name[0] << 0) ^ rol32(hash, 7 * 1);
1529 default: /* case 0: */
1530 return hash;
1534 enum xfs_dacmp
1535 xfs_da_compname(
1536 struct xfs_da_args *args,
1537 const unsigned char *name,
1538 int len)
1540 return (args->namelen == len && memcmp(args->name, name, len) == 0) ?
1541 XFS_CMP_EXACT : XFS_CMP_DIFFERENT;
1544 static xfs_dahash_t
1545 xfs_default_hashname(
1546 struct xfs_name *name)
1548 return xfs_da_hashname(name->name, name->len);
1551 const struct xfs_nameops xfs_default_nameops = {
1552 .hashname = xfs_default_hashname,
1553 .compname = xfs_da_compname
1557 xfs_da_grow_inode_int(
1558 struct xfs_da_args *args,
1559 xfs_fileoff_t *bno,
1560 int count)
1562 struct xfs_trans *tp = args->trans;
1563 struct xfs_inode *dp = args->dp;
1564 int w = args->whichfork;
1565 xfs_drfsbno_t nblks = dp->i_d.di_nblocks;
1566 struct xfs_bmbt_irec map, *mapp;
1567 int nmap, error, got, i, mapi;
1570 * Find a spot in the file space to put the new block.
1572 error = xfs_bmap_first_unused(tp, dp, count, bno, w);
1573 if (error)
1574 return error;
1577 * Try mapping it in one filesystem block.
1579 nmap = 1;
1580 ASSERT(args->firstblock != NULL);
1581 error = xfs_bmapi(tp, dp, *bno, count,
1582 xfs_bmapi_aflag(w)|XFS_BMAPI_WRITE|XFS_BMAPI_METADATA|
1583 XFS_BMAPI_CONTIG,
1584 args->firstblock, args->total, &map, &nmap,
1585 args->flist);
1586 if (error)
1587 return error;
1589 ASSERT(nmap <= 1);
1590 if (nmap == 1) {
1591 mapp = &map;
1592 mapi = 1;
1593 } else if (nmap == 0 && count > 1) {
1594 xfs_fileoff_t b;
1595 int c;
1598 * If we didn't get it and the block might work if fragmented,
1599 * try without the CONTIG flag. Loop until we get it all.
1601 mapp = kmem_alloc(sizeof(*mapp) * count, KM_SLEEP);
1602 for (b = *bno, mapi = 0; b < *bno + count; ) {
1603 nmap = MIN(XFS_BMAP_MAX_NMAP, count);
1604 c = (int)(*bno + count - b);
1605 error = xfs_bmapi(tp, dp, b, c,
1606 xfs_bmapi_aflag(w)|XFS_BMAPI_WRITE|
1607 XFS_BMAPI_METADATA,
1608 args->firstblock, args->total,
1609 &mapp[mapi], &nmap, args->flist);
1610 if (error)
1611 goto out_free_map;
1612 if (nmap < 1)
1613 break;
1614 mapi += nmap;
1615 b = mapp[mapi - 1].br_startoff +
1616 mapp[mapi - 1].br_blockcount;
1618 } else {
1619 mapi = 0;
1620 mapp = NULL;
1624 * Count the blocks we got, make sure it matches the total.
1626 for (i = 0, got = 0; i < mapi; i++)
1627 got += mapp[i].br_blockcount;
1628 if (got != count || mapp[0].br_startoff != *bno ||
1629 mapp[mapi - 1].br_startoff + mapp[mapi - 1].br_blockcount !=
1630 *bno + count) {
1631 error = XFS_ERROR(ENOSPC);
1632 goto out_free_map;
1635 /* account for newly allocated blocks in reserved blocks total */
1636 args->total -= dp->i_d.di_nblocks - nblks;
1638 out_free_map:
1639 if (mapp != &map)
1640 kmem_free(mapp);
1641 return error;
1645 * Add a block to the btree ahead of the file.
1646 * Return the new block number to the caller.
1649 xfs_da_grow_inode(
1650 struct xfs_da_args *args,
1651 xfs_dablk_t *new_blkno)
1653 xfs_fileoff_t bno;
1654 int count;
1655 int error;
1657 if (args->whichfork == XFS_DATA_FORK) {
1658 bno = args->dp->i_mount->m_dirleafblk;
1659 count = args->dp->i_mount->m_dirblkfsbs;
1660 } else {
1661 bno = 0;
1662 count = 1;
1665 error = xfs_da_grow_inode_int(args, &bno, count);
1666 if (!error)
1667 *new_blkno = (xfs_dablk_t)bno;
1668 return error;
1672 * Ick. We need to always be able to remove a btree block, even
1673 * if there's no space reservation because the filesystem is full.
1674 * This is called if xfs_bunmapi on a btree block fails due to ENOSPC.
1675 * It swaps the target block with the last block in the file. The
1676 * last block in the file can always be removed since it can't cause
1677 * a bmap btree split to do that.
1679 STATIC int
1680 xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop,
1681 xfs_dabuf_t **dead_bufp)
1683 xfs_dablk_t dead_blkno, last_blkno, sib_blkno, par_blkno;
1684 xfs_dabuf_t *dead_buf, *last_buf, *sib_buf, *par_buf;
1685 xfs_fileoff_t lastoff;
1686 xfs_inode_t *ip;
1687 xfs_trans_t *tp;
1688 xfs_mount_t *mp;
1689 int error, w, entno, level, dead_level;
1690 xfs_da_blkinfo_t *dead_info, *sib_info;
1691 xfs_da_intnode_t *par_node, *dead_node;
1692 xfs_dir2_leaf_t *dead_leaf2;
1693 xfs_dahash_t dead_hash;
1695 dead_buf = *dead_bufp;
1696 dead_blkno = *dead_blknop;
1697 tp = args->trans;
1698 ip = args->dp;
1699 w = args->whichfork;
1700 ASSERT(w == XFS_DATA_FORK);
1701 mp = ip->i_mount;
1702 lastoff = mp->m_dirfreeblk;
1703 error = xfs_bmap_last_before(tp, ip, &lastoff, w);
1704 if (error)
1705 return error;
1706 if (unlikely(lastoff == 0)) {
1707 XFS_ERROR_REPORT("xfs_da_swap_lastblock(1)", XFS_ERRLEVEL_LOW,
1708 mp);
1709 return XFS_ERROR(EFSCORRUPTED);
1712 * Read the last block in the btree space.
1714 last_blkno = (xfs_dablk_t)lastoff - mp->m_dirblkfsbs;
1715 if ((error = xfs_da_read_buf(tp, ip, last_blkno, -1, &last_buf, w)))
1716 return error;
1718 * Copy the last block into the dead buffer and log it.
1720 memcpy(dead_buf->data, last_buf->data, mp->m_dirblksize);
1721 xfs_da_log_buf(tp, dead_buf, 0, mp->m_dirblksize - 1);
1722 dead_info = dead_buf->data;
1724 * Get values from the moved block.
1726 if (dead_info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)) {
1727 dead_leaf2 = (xfs_dir2_leaf_t *)dead_info;
1728 dead_level = 0;
1729 dead_hash = be32_to_cpu(dead_leaf2->ents[be16_to_cpu(dead_leaf2->hdr.count) - 1].hashval);
1730 } else {
1731 ASSERT(dead_info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
1732 dead_node = (xfs_da_intnode_t *)dead_info;
1733 dead_level = be16_to_cpu(dead_node->hdr.level);
1734 dead_hash = be32_to_cpu(dead_node->btree[be16_to_cpu(dead_node->hdr.count) - 1].hashval);
1736 sib_buf = par_buf = NULL;
1738 * If the moved block has a left sibling, fix up the pointers.
1740 if ((sib_blkno = be32_to_cpu(dead_info->back))) {
1741 if ((error = xfs_da_read_buf(tp, ip, sib_blkno, -1, &sib_buf, w)))
1742 goto done;
1743 sib_info = sib_buf->data;
1744 if (unlikely(
1745 be32_to_cpu(sib_info->forw) != last_blkno ||
1746 sib_info->magic != dead_info->magic)) {
1747 XFS_ERROR_REPORT("xfs_da_swap_lastblock(2)",
1748 XFS_ERRLEVEL_LOW, mp);
1749 error = XFS_ERROR(EFSCORRUPTED);
1750 goto done;
1752 sib_info->forw = cpu_to_be32(dead_blkno);
1753 xfs_da_log_buf(tp, sib_buf,
1754 XFS_DA_LOGRANGE(sib_info, &sib_info->forw,
1755 sizeof(sib_info->forw)));
1756 xfs_da_buf_done(sib_buf);
1757 sib_buf = NULL;
1760 * If the moved block has a right sibling, fix up the pointers.
1762 if ((sib_blkno = be32_to_cpu(dead_info->forw))) {
1763 if ((error = xfs_da_read_buf(tp, ip, sib_blkno, -1, &sib_buf, w)))
1764 goto done;
1765 sib_info = sib_buf->data;
1766 if (unlikely(
1767 be32_to_cpu(sib_info->back) != last_blkno ||
1768 sib_info->magic != dead_info->magic)) {
1769 XFS_ERROR_REPORT("xfs_da_swap_lastblock(3)",
1770 XFS_ERRLEVEL_LOW, mp);
1771 error = XFS_ERROR(EFSCORRUPTED);
1772 goto done;
1774 sib_info->back = cpu_to_be32(dead_blkno);
1775 xfs_da_log_buf(tp, sib_buf,
1776 XFS_DA_LOGRANGE(sib_info, &sib_info->back,
1777 sizeof(sib_info->back)));
1778 xfs_da_buf_done(sib_buf);
1779 sib_buf = NULL;
1781 par_blkno = mp->m_dirleafblk;
1782 level = -1;
1784 * Walk down the tree looking for the parent of the moved block.
1786 for (;;) {
1787 if ((error = xfs_da_read_buf(tp, ip, par_blkno, -1, &par_buf, w)))
1788 goto done;
1789 par_node = par_buf->data;
1790 if (unlikely(par_node->hdr.info.magic !=
1791 cpu_to_be16(XFS_DA_NODE_MAGIC) ||
1792 (level >= 0 && level != be16_to_cpu(par_node->hdr.level) + 1))) {
1793 XFS_ERROR_REPORT("xfs_da_swap_lastblock(4)",
1794 XFS_ERRLEVEL_LOW, mp);
1795 error = XFS_ERROR(EFSCORRUPTED);
1796 goto done;
1798 level = be16_to_cpu(par_node->hdr.level);
1799 for (entno = 0;
1800 entno < be16_to_cpu(par_node->hdr.count) &&
1801 be32_to_cpu(par_node->btree[entno].hashval) < dead_hash;
1802 entno++)
1803 continue;
1804 if (unlikely(entno == be16_to_cpu(par_node->hdr.count))) {
1805 XFS_ERROR_REPORT("xfs_da_swap_lastblock(5)",
1806 XFS_ERRLEVEL_LOW, mp);
1807 error = XFS_ERROR(EFSCORRUPTED);
1808 goto done;
1810 par_blkno = be32_to_cpu(par_node->btree[entno].before);
1811 if (level == dead_level + 1)
1812 break;
1813 xfs_da_brelse(tp, par_buf);
1814 par_buf = NULL;
1817 * We're in the right parent block.
1818 * Look for the right entry.
1820 for (;;) {
1821 for (;
1822 entno < be16_to_cpu(par_node->hdr.count) &&
1823 be32_to_cpu(par_node->btree[entno].before) != last_blkno;
1824 entno++)
1825 continue;
1826 if (entno < be16_to_cpu(par_node->hdr.count))
1827 break;
1828 par_blkno = be32_to_cpu(par_node->hdr.info.forw);
1829 xfs_da_brelse(tp, par_buf);
1830 par_buf = NULL;
1831 if (unlikely(par_blkno == 0)) {
1832 XFS_ERROR_REPORT("xfs_da_swap_lastblock(6)",
1833 XFS_ERRLEVEL_LOW, mp);
1834 error = XFS_ERROR(EFSCORRUPTED);
1835 goto done;
1837 if ((error = xfs_da_read_buf(tp, ip, par_blkno, -1, &par_buf, w)))
1838 goto done;
1839 par_node = par_buf->data;
1840 if (unlikely(
1841 be16_to_cpu(par_node->hdr.level) != level ||
1842 par_node->hdr.info.magic != cpu_to_be16(XFS_DA_NODE_MAGIC))) {
1843 XFS_ERROR_REPORT("xfs_da_swap_lastblock(7)",
1844 XFS_ERRLEVEL_LOW, mp);
1845 error = XFS_ERROR(EFSCORRUPTED);
1846 goto done;
1848 entno = 0;
1851 * Update the parent entry pointing to the moved block.
1853 par_node->btree[entno].before = cpu_to_be32(dead_blkno);
1854 xfs_da_log_buf(tp, par_buf,
1855 XFS_DA_LOGRANGE(par_node, &par_node->btree[entno].before,
1856 sizeof(par_node->btree[entno].before)));
1857 xfs_da_buf_done(par_buf);
1858 xfs_da_buf_done(dead_buf);
1859 *dead_blknop = last_blkno;
1860 *dead_bufp = last_buf;
1861 return 0;
1862 done:
1863 if (par_buf)
1864 xfs_da_brelse(tp, par_buf);
1865 if (sib_buf)
1866 xfs_da_brelse(tp, sib_buf);
1867 xfs_da_brelse(tp, last_buf);
1868 return error;
1872 * Remove a btree block from a directory or attribute.
1875 xfs_da_shrink_inode(xfs_da_args_t *args, xfs_dablk_t dead_blkno,
1876 xfs_dabuf_t *dead_buf)
1878 xfs_inode_t *dp;
1879 int done, error, w, count;
1880 xfs_trans_t *tp;
1881 xfs_mount_t *mp;
1883 dp = args->dp;
1884 w = args->whichfork;
1885 tp = args->trans;
1886 mp = dp->i_mount;
1887 if (w == XFS_DATA_FORK)
1888 count = mp->m_dirblkfsbs;
1889 else
1890 count = 1;
1891 for (;;) {
1893 * Remove extents. If we get ENOSPC for a dir we have to move
1894 * the last block to the place we want to kill.
1896 if ((error = xfs_bunmapi(tp, dp, dead_blkno, count,
1897 xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA,
1898 0, args->firstblock, args->flist,
1899 &done)) == ENOSPC) {
1900 if (w != XFS_DATA_FORK)
1901 break;
1902 if ((error = xfs_da_swap_lastblock(args, &dead_blkno,
1903 &dead_buf)))
1904 break;
1905 } else {
1906 break;
1909 xfs_da_binval(tp, dead_buf);
1910 return error;
1914 * See if the mapping(s) for this btree block are valid, i.e.
1915 * don't contain holes, are logically contiguous, and cover the whole range.
1917 STATIC int
1918 xfs_da_map_covers_blocks(
1919 int nmap,
1920 xfs_bmbt_irec_t *mapp,
1921 xfs_dablk_t bno,
1922 int count)
1924 int i;
1925 xfs_fileoff_t off;
1927 for (i = 0, off = bno; i < nmap; i++) {
1928 if (mapp[i].br_startblock == HOLESTARTBLOCK ||
1929 mapp[i].br_startblock == DELAYSTARTBLOCK) {
1930 return 0;
1932 if (off != mapp[i].br_startoff) {
1933 return 0;
1935 off += mapp[i].br_blockcount;
1937 return off == bno + count;
1941 * Make a dabuf.
1942 * Used for get_buf, read_buf, read_bufr, and reada_buf.
1944 STATIC int
1945 xfs_da_do_buf(
1946 xfs_trans_t *trans,
1947 xfs_inode_t *dp,
1948 xfs_dablk_t bno,
1949 xfs_daddr_t *mappedbnop,
1950 xfs_dabuf_t **bpp,
1951 int whichfork,
1952 int caller)
1954 xfs_buf_t *bp = NULL;
1955 xfs_buf_t **bplist;
1956 int error=0;
1957 int i;
1958 xfs_bmbt_irec_t map;
1959 xfs_bmbt_irec_t *mapp;
1960 xfs_daddr_t mappedbno;
1961 xfs_mount_t *mp;
1962 int nbplist=0;
1963 int nfsb;
1964 int nmap;
1965 xfs_dabuf_t *rbp;
1967 mp = dp->i_mount;
1968 nfsb = (whichfork == XFS_DATA_FORK) ? mp->m_dirblkfsbs : 1;
1969 mappedbno = *mappedbnop;
1971 * Caller doesn't have a mapping. -2 means don't complain
1972 * if we land in a hole.
1974 if (mappedbno == -1 || mappedbno == -2) {
1976 * Optimize the one-block case.
1978 if (nfsb == 1) {
1979 xfs_fsblock_t fsb;
1981 if ((error =
1982 xfs_bmapi_single(trans, dp, whichfork, &fsb,
1983 (xfs_fileoff_t)bno))) {
1984 return error;
1986 mapp = &map;
1987 if (fsb == NULLFSBLOCK) {
1988 nmap = 0;
1989 } else {
1990 map.br_startblock = fsb;
1991 map.br_startoff = (xfs_fileoff_t)bno;
1992 map.br_blockcount = 1;
1993 nmap = 1;
1995 } else {
1996 mapp = kmem_alloc(sizeof(*mapp) * nfsb, KM_SLEEP);
1997 nmap = nfsb;
1998 if ((error = xfs_bmapi(trans, dp, (xfs_fileoff_t)bno,
1999 nfsb,
2000 XFS_BMAPI_METADATA |
2001 xfs_bmapi_aflag(whichfork),
2002 NULL, 0, mapp, &nmap, NULL)))
2003 goto exit0;
2005 } else {
2006 map.br_startblock = XFS_DADDR_TO_FSB(mp, mappedbno);
2007 map.br_startoff = (xfs_fileoff_t)bno;
2008 map.br_blockcount = nfsb;
2009 mapp = &map;
2010 nmap = 1;
2012 if (!xfs_da_map_covers_blocks(nmap, mapp, bno, nfsb)) {
2013 error = mappedbno == -2 ? 0 : XFS_ERROR(EFSCORRUPTED);
2014 if (unlikely(error == EFSCORRUPTED)) {
2015 if (xfs_error_level >= XFS_ERRLEVEL_LOW) {
2016 xfs_alert(mp, "%s: bno %lld dir: inode %lld",
2017 __func__, (long long)bno,
2018 (long long)dp->i_ino);
2019 for (i = 0; i < nmap; i++) {
2020 xfs_alert(mp,
2021 "[%02d] br_startoff %lld br_startblock %lld br_blockcount %lld br_state %d",
2023 (long long)mapp[i].br_startoff,
2024 (long long)mapp[i].br_startblock,
2025 (long long)mapp[i].br_blockcount,
2026 mapp[i].br_state);
2029 XFS_ERROR_REPORT("xfs_da_do_buf(1)",
2030 XFS_ERRLEVEL_LOW, mp);
2032 goto exit0;
2034 if (caller != 3 && nmap > 1) {
2035 bplist = kmem_alloc(sizeof(*bplist) * nmap, KM_SLEEP);
2036 nbplist = 0;
2037 } else
2038 bplist = NULL;
2040 * Turn the mapping(s) into buffer(s).
2042 for (i = 0; i < nmap; i++) {
2043 int nmapped;
2045 mappedbno = XFS_FSB_TO_DADDR(mp, mapp[i].br_startblock);
2046 if (i == 0)
2047 *mappedbnop = mappedbno;
2048 nmapped = (int)XFS_FSB_TO_BB(mp, mapp[i].br_blockcount);
2049 switch (caller) {
2050 case 0:
2051 bp = xfs_trans_get_buf(trans, mp->m_ddev_targp,
2052 mappedbno, nmapped, 0);
2053 error = bp ? bp->b_error : XFS_ERROR(EIO);
2054 break;
2055 case 1:
2056 case 2:
2057 bp = NULL;
2058 error = xfs_trans_read_buf(mp, trans, mp->m_ddev_targp,
2059 mappedbno, nmapped, 0, &bp);
2060 break;
2061 case 3:
2062 xfs_buf_readahead(mp->m_ddev_targp, mappedbno, nmapped);
2063 error = 0;
2064 bp = NULL;
2065 break;
2067 if (error) {
2068 if (bp)
2069 xfs_trans_brelse(trans, bp);
2070 goto exit1;
2072 if (!bp)
2073 continue;
2074 if (caller == 1) {
2075 if (whichfork == XFS_ATTR_FORK) {
2076 XFS_BUF_SET_VTYPE_REF(bp, B_FS_ATTR_BTREE,
2077 XFS_ATTR_BTREE_REF);
2078 } else {
2079 XFS_BUF_SET_VTYPE_REF(bp, B_FS_DIR_BTREE,
2080 XFS_DIR_BTREE_REF);
2083 if (bplist) {
2084 bplist[nbplist++] = bp;
2088 * Build a dabuf structure.
2090 if (bplist) {
2091 rbp = xfs_da_buf_make(nbplist, bplist);
2092 } else if (bp)
2093 rbp = xfs_da_buf_make(1, &bp);
2094 else
2095 rbp = NULL;
2097 * For read_buf, check the magic number.
2099 if (caller == 1) {
2100 xfs_dir2_data_hdr_t *hdr = rbp->data;
2101 xfs_dir2_free_t *free = rbp->data;
2102 xfs_da_blkinfo_t *info = rbp->data;
2103 uint magic, magic1;
2105 magic = be16_to_cpu(info->magic);
2106 magic1 = be32_to_cpu(hdr->magic);
2107 if (unlikely(
2108 XFS_TEST_ERROR((magic != XFS_DA_NODE_MAGIC) &&
2109 (magic != XFS_ATTR_LEAF_MAGIC) &&
2110 (magic != XFS_DIR2_LEAF1_MAGIC) &&
2111 (magic != XFS_DIR2_LEAFN_MAGIC) &&
2112 (magic1 != XFS_DIR2_BLOCK_MAGIC) &&
2113 (magic1 != XFS_DIR2_DATA_MAGIC) &&
2114 (free->hdr.magic != cpu_to_be32(XFS_DIR2_FREE_MAGIC)),
2115 mp, XFS_ERRTAG_DA_READ_BUF,
2116 XFS_RANDOM_DA_READ_BUF))) {
2117 trace_xfs_da_btree_corrupt(rbp->bps[0], _RET_IP_);
2118 XFS_CORRUPTION_ERROR("xfs_da_do_buf(2)",
2119 XFS_ERRLEVEL_LOW, mp, info);
2120 error = XFS_ERROR(EFSCORRUPTED);
2121 xfs_da_brelse(trans, rbp);
2122 nbplist = 0;
2123 goto exit1;
2126 if (bplist) {
2127 kmem_free(bplist);
2129 if (mapp != &map) {
2130 kmem_free(mapp);
2132 if (bpp)
2133 *bpp = rbp;
2134 return 0;
2135 exit1:
2136 if (bplist) {
2137 for (i = 0; i < nbplist; i++)
2138 xfs_trans_brelse(trans, bplist[i]);
2139 kmem_free(bplist);
2141 exit0:
2142 if (mapp != &map)
2143 kmem_free(mapp);
2144 if (bpp)
2145 *bpp = NULL;
2146 return error;
2150 * Get a buffer for the dir/attr block.
2153 xfs_da_get_buf(
2154 xfs_trans_t *trans,
2155 xfs_inode_t *dp,
2156 xfs_dablk_t bno,
2157 xfs_daddr_t mappedbno,
2158 xfs_dabuf_t **bpp,
2159 int whichfork)
2161 return xfs_da_do_buf(trans, dp, bno, &mappedbno, bpp, whichfork, 0);
2165 * Get a buffer for the dir/attr block, fill in the contents.
2168 xfs_da_read_buf(
2169 xfs_trans_t *trans,
2170 xfs_inode_t *dp,
2171 xfs_dablk_t bno,
2172 xfs_daddr_t mappedbno,
2173 xfs_dabuf_t **bpp,
2174 int whichfork)
2176 return xfs_da_do_buf(trans, dp, bno, &mappedbno, bpp, whichfork, 1);
2180 * Readahead the dir/attr block.
2182 xfs_daddr_t
2183 xfs_da_reada_buf(
2184 xfs_trans_t *trans,
2185 xfs_inode_t *dp,
2186 xfs_dablk_t bno,
2187 int whichfork)
2189 xfs_daddr_t rval;
2191 rval = -1;
2192 if (xfs_da_do_buf(trans, dp, bno, &rval, NULL, whichfork, 3))
2193 return -1;
2194 else
2195 return rval;
2198 kmem_zone_t *xfs_da_state_zone; /* anchor for state struct zone */
2199 kmem_zone_t *xfs_dabuf_zone; /* dabuf zone */
2202 * Allocate a dir-state structure.
2203 * We don't put them on the stack since they're large.
2205 xfs_da_state_t *
2206 xfs_da_state_alloc(void)
2208 return kmem_zone_zalloc(xfs_da_state_zone, KM_NOFS);
2212 * Kill the altpath contents of a da-state structure.
2214 STATIC void
2215 xfs_da_state_kill_altpath(xfs_da_state_t *state)
2217 int i;
2219 for (i = 0; i < state->altpath.active; i++) {
2220 if (state->altpath.blk[i].bp) {
2221 if (state->altpath.blk[i].bp != state->path.blk[i].bp)
2222 xfs_da_buf_done(state->altpath.blk[i].bp);
2223 state->altpath.blk[i].bp = NULL;
2226 state->altpath.active = 0;
2230 * Free a da-state structure.
2232 void
2233 xfs_da_state_free(xfs_da_state_t *state)
2235 int i;
2237 xfs_da_state_kill_altpath(state);
2238 for (i = 0; i < state->path.active; i++) {
2239 if (state->path.blk[i].bp)
2240 xfs_da_buf_done(state->path.blk[i].bp);
2242 if (state->extravalid && state->extrablk.bp)
2243 xfs_da_buf_done(state->extrablk.bp);
2244 #ifdef DEBUG
2245 memset((char *)state, 0, sizeof(*state));
2246 #endif /* DEBUG */
2247 kmem_zone_free(xfs_da_state_zone, state);
2251 * Create a dabuf.
2253 /* ARGSUSED */
2254 STATIC xfs_dabuf_t *
2255 xfs_da_buf_make(int nbuf, xfs_buf_t **bps)
2257 xfs_buf_t *bp;
2258 xfs_dabuf_t *dabuf;
2259 int i;
2260 int off;
2262 if (nbuf == 1)
2263 dabuf = kmem_zone_alloc(xfs_dabuf_zone, KM_NOFS);
2264 else
2265 dabuf = kmem_alloc(XFS_DA_BUF_SIZE(nbuf), KM_NOFS);
2266 dabuf->dirty = 0;
2267 if (nbuf == 1) {
2268 dabuf->nbuf = 1;
2269 bp = bps[0];
2270 dabuf->bbcount = (short)BTOBB(XFS_BUF_COUNT(bp));
2271 dabuf->data = bp->b_addr;
2272 dabuf->bps[0] = bp;
2273 } else {
2274 dabuf->nbuf = nbuf;
2275 for (i = 0, dabuf->bbcount = 0; i < nbuf; i++) {
2276 dabuf->bps[i] = bp = bps[i];
2277 dabuf->bbcount += BTOBB(XFS_BUF_COUNT(bp));
2279 dabuf->data = kmem_alloc(BBTOB(dabuf->bbcount), KM_SLEEP);
2280 for (i = off = 0; i < nbuf; i++, off += XFS_BUF_COUNT(bp)) {
2281 bp = bps[i];
2282 memcpy((char *)dabuf->data + off, bp->b_addr,
2283 XFS_BUF_COUNT(bp));
2286 return dabuf;
2290 * Un-dirty a dabuf.
2292 STATIC void
2293 xfs_da_buf_clean(xfs_dabuf_t *dabuf)
2295 xfs_buf_t *bp;
2296 int i;
2297 int off;
2299 if (dabuf->dirty) {
2300 ASSERT(dabuf->nbuf > 1);
2301 dabuf->dirty = 0;
2302 for (i = off = 0; i < dabuf->nbuf;
2303 i++, off += XFS_BUF_COUNT(bp)) {
2304 bp = dabuf->bps[i];
2305 memcpy(bp->b_addr, dabuf->data + off,
2306 XFS_BUF_COUNT(bp));
2312 * Release a dabuf.
2314 void
2315 xfs_da_buf_done(xfs_dabuf_t *dabuf)
2317 ASSERT(dabuf);
2318 ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]);
2319 if (dabuf->dirty)
2320 xfs_da_buf_clean(dabuf);
2321 if (dabuf->nbuf > 1) {
2322 kmem_free(dabuf->data);
2323 kmem_free(dabuf);
2324 } else {
2325 kmem_zone_free(xfs_dabuf_zone, dabuf);
2330 * Log transaction from a dabuf.
2332 void
2333 xfs_da_log_buf(xfs_trans_t *tp, xfs_dabuf_t *dabuf, uint first, uint last)
2335 xfs_buf_t *bp;
2336 uint f;
2337 int i;
2338 uint l;
2339 int off;
2341 ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]);
2342 if (dabuf->nbuf == 1) {
2343 ASSERT(dabuf->data == dabuf->bps[0]->b_addr);
2344 xfs_trans_log_buf(tp, dabuf->bps[0], first, last);
2345 return;
2347 dabuf->dirty = 1;
2348 ASSERT(first <= last);
2349 for (i = off = 0; i < dabuf->nbuf; i++, off += XFS_BUF_COUNT(bp)) {
2350 bp = dabuf->bps[i];
2351 f = off;
2352 l = f + XFS_BUF_COUNT(bp) - 1;
2353 if (f < first)
2354 f = first;
2355 if (l > last)
2356 l = last;
2357 if (f <= l)
2358 xfs_trans_log_buf(tp, bp, f - off, l - off);
2360 * B_DONE is set by xfs_trans_log buf.
2361 * If we don't set it on a new buffer (get not read)
2362 * then if we don't put anything in the buffer it won't
2363 * be set, and at commit it it released into the cache,
2364 * and then a read will fail.
2366 else if (!(XFS_BUF_ISDONE(bp)))
2367 XFS_BUF_DONE(bp);
2369 ASSERT(last < off);
2373 * Release dabuf from a transaction.
2374 * Have to free up the dabuf before the buffers are released,
2375 * since the synchronization on the dabuf is really the lock on the buffer.
2377 void
2378 xfs_da_brelse(xfs_trans_t *tp, xfs_dabuf_t *dabuf)
2380 xfs_buf_t *bp;
2381 xfs_buf_t **bplist;
2382 int i;
2383 int nbuf;
2385 ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]);
2386 if ((nbuf = dabuf->nbuf) == 1) {
2387 bplist = &bp;
2388 bp = dabuf->bps[0];
2389 } else {
2390 bplist = kmem_alloc(nbuf * sizeof(*bplist), KM_SLEEP);
2391 memcpy(bplist, dabuf->bps, nbuf * sizeof(*bplist));
2393 xfs_da_buf_done(dabuf);
2394 for (i = 0; i < nbuf; i++)
2395 xfs_trans_brelse(tp, bplist[i]);
2396 if (bplist != &bp)
2397 kmem_free(bplist);
2401 * Invalidate dabuf from a transaction.
2403 void
2404 xfs_da_binval(xfs_trans_t *tp, xfs_dabuf_t *dabuf)
2406 xfs_buf_t *bp;
2407 xfs_buf_t **bplist;
2408 int i;
2409 int nbuf;
2411 ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]);
2412 if ((nbuf = dabuf->nbuf) == 1) {
2413 bplist = &bp;
2414 bp = dabuf->bps[0];
2415 } else {
2416 bplist = kmem_alloc(nbuf * sizeof(*bplist), KM_SLEEP);
2417 memcpy(bplist, dabuf->bps, nbuf * sizeof(*bplist));
2419 xfs_da_buf_done(dabuf);
2420 for (i = 0; i < nbuf; i++)
2421 xfs_trans_binval(tp, bplist[i]);
2422 if (bplist != &bp)
2423 kmem_free(bplist);
2427 * Get the first daddr from a dabuf.
2429 xfs_daddr_t
2430 xfs_da_blkno(xfs_dabuf_t *dabuf)
2432 ASSERT(dabuf->nbuf);
2433 ASSERT(dabuf->data);
2434 return XFS_BUF_ADDR(dabuf->bps[0]);