2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_types.h"
23 #include "xfs_trans.h"
27 #include "xfs_dmapi.h"
28 #include "xfs_mount.h"
29 #include "xfs_da_btree.h"
30 #include "xfs_bmap_btree.h"
31 #include "xfs_alloc_btree.h"
32 #include "xfs_ialloc_btree.h"
33 #include "xfs_dir_sf.h"
34 #include "xfs_dir2_sf.h"
35 #include "xfs_attr_sf.h"
36 #include "xfs_dinode.h"
37 #include "xfs_inode.h"
38 #include "xfs_inode_item.h"
39 #include "xfs_alloc.h"
40 #include "xfs_btree.h"
42 #include "xfs_dir_leaf.h"
43 #include "xfs_error.h"
48 * Routines to implement leaf blocks of directories as Btrees of hashed names.
51 /*========================================================================
52 * Function prototypes for the kernel.
53 *========================================================================*/
56 * Routines used for growing the Btree.
58 STATIC
void xfs_dir_leaf_add_work(xfs_dabuf_t
*leaf_buffer
, xfs_da_args_t
*args
,
61 STATIC
int xfs_dir_leaf_compact(xfs_trans_t
*trans
, xfs_dabuf_t
*leaf_buffer
,
62 int musthave
, int justcheck
);
63 STATIC
void xfs_dir_leaf_rebalance(xfs_da_state_t
*state
,
64 xfs_da_state_blk_t
*blk1
,
65 xfs_da_state_blk_t
*blk2
);
66 STATIC
int xfs_dir_leaf_figure_balance(xfs_da_state_t
*state
,
67 xfs_da_state_blk_t
*leaf_blk_1
,
68 xfs_da_state_blk_t
*leaf_blk_2
,
69 int *number_entries_in_blk1
,
70 int *number_namebytes_in_blk1
);
72 STATIC
int xfs_dir_leaf_create(struct xfs_da_args
*args
,
73 xfs_dablk_t which_block
,
74 struct xfs_dabuf
**bpp
);
79 STATIC
void xfs_dir_leaf_moveents(xfs_dir_leafblock_t
*src_leaf
,
81 xfs_dir_leafblock_t
*dst_leaf
,
82 int dst_start
, int move_count
,
86 /*========================================================================
87 * External routines when dirsize < XFS_IFORK_DSIZE(dp).
88 *========================================================================*/
92 * Validate a given inode number.
95 xfs_dir_ino_validate(xfs_mount_t
*mp
, xfs_ino_t ino
)
97 xfs_agblock_t agblkno
;
103 agno
= XFS_INO_TO_AGNO(mp
, ino
);
104 agblkno
= XFS_INO_TO_AGBNO(mp
, ino
);
105 ioff
= XFS_INO_TO_OFFSET(mp
, ino
);
106 agino
= XFS_OFFBNO_TO_AGINO(mp
, agblkno
, ioff
);
108 agno
< mp
->m_sb
.sb_agcount
&&
109 agblkno
< mp
->m_sb
.sb_agblocks
&&
111 ioff
< (1 << mp
->m_sb
.sb_inopblog
) &&
112 XFS_AGINO_TO_INO(mp
, agno
, agino
) == ino
;
113 if (unlikely(XFS_TEST_ERROR(!ino_ok
, mp
, XFS_ERRTAG_DIR_INO_VALIDATE
,
114 XFS_RANDOM_DIR_INO_VALIDATE
))) {
115 xfs_fs_cmn_err(CE_WARN
, mp
, "Invalid inode number 0x%Lx",
116 (unsigned long long) ino
);
117 XFS_ERROR_REPORT("xfs_dir_ino_validate", XFS_ERRLEVEL_LOW
, mp
);
118 return XFS_ERROR(EFSCORRUPTED
);
124 * Create the initial contents of a shortform directory.
127 xfs_dir_shortform_create(xfs_da_args_t
*args
, xfs_ino_t parent
)
129 xfs_dir_sf_hdr_t
*hdr
;
134 ASSERT(dp
->i_d
.di_size
== 0);
135 if (dp
->i_d
.di_format
== XFS_DINODE_FMT_EXTENTS
) {
136 dp
->i_df
.if_flags
&= ~XFS_IFEXTENTS
; /* just in case */
137 dp
->i_d
.di_format
= XFS_DINODE_FMT_LOCAL
;
138 xfs_trans_log_inode(args
->trans
, dp
, XFS_ILOG_CORE
);
139 dp
->i_df
.if_flags
|= XFS_IFINLINE
;
141 ASSERT(dp
->i_df
.if_flags
& XFS_IFINLINE
);
142 ASSERT(dp
->i_df
.if_bytes
== 0);
143 xfs_idata_realloc(dp
, sizeof(*hdr
), XFS_DATA_FORK
);
144 hdr
= (xfs_dir_sf_hdr_t
*)dp
->i_df
.if_u1
.if_data
;
145 XFS_DIR_SF_PUT_DIRINO(&parent
, &hdr
->parent
);
148 dp
->i_d
.di_size
= sizeof(*hdr
);
149 xfs_trans_log_inode(args
->trans
, dp
, XFS_ILOG_CORE
| XFS_ILOG_DDATA
);
154 * Add a name to the shortform directory structure.
155 * Overflow from the inode has already been checked for.
158 xfs_dir_shortform_addname(xfs_da_args_t
*args
)
160 xfs_dir_shortform_t
*sf
;
161 xfs_dir_sf_entry_t
*sfe
;
166 ASSERT(dp
->i_df
.if_flags
& XFS_IFINLINE
);
168 * Catch the case where the conversion from shortform to leaf
169 * failed part way through.
171 if (dp
->i_d
.di_size
< sizeof(xfs_dir_sf_hdr_t
)) {
172 ASSERT(XFS_FORCED_SHUTDOWN(dp
->i_mount
));
173 return XFS_ERROR(EIO
);
175 ASSERT(dp
->i_df
.if_bytes
== dp
->i_d
.di_size
);
176 ASSERT(dp
->i_df
.if_u1
.if_data
!= NULL
);
177 sf
= (xfs_dir_shortform_t
*)dp
->i_df
.if_u1
.if_data
;
179 for (i
= sf
->hdr
.count
-1; i
>= 0; i
--) {
180 if (sfe
->namelen
== args
->namelen
&&
181 args
->name
[0] == sfe
->name
[0] &&
182 memcmp(args
->name
, sfe
->name
, args
->namelen
) == 0)
183 return XFS_ERROR(EEXIST
);
184 sfe
= XFS_DIR_SF_NEXTENTRY(sfe
);
187 offset
= (int)((char *)sfe
- (char *)sf
);
188 size
= XFS_DIR_SF_ENTSIZE_BYNAME(args
->namelen
);
189 xfs_idata_realloc(dp
, size
, XFS_DATA_FORK
);
190 sf
= (xfs_dir_shortform_t
*)dp
->i_df
.if_u1
.if_data
;
191 sfe
= (xfs_dir_sf_entry_t
*)((char *)sf
+ offset
);
193 XFS_DIR_SF_PUT_DIRINO(&args
->inumber
, &sfe
->inumber
);
194 sfe
->namelen
= args
->namelen
;
195 memcpy(sfe
->name
, args
->name
, sfe
->namelen
);
198 dp
->i_d
.di_size
+= size
;
199 xfs_trans_log_inode(args
->trans
, dp
, XFS_ILOG_CORE
| XFS_ILOG_DDATA
);
205 * Remove a name from the shortform directory structure.
208 xfs_dir_shortform_removename(xfs_da_args_t
*args
)
210 xfs_dir_shortform_t
*sf
;
211 xfs_dir_sf_entry_t
*sfe
;
212 int base
, size
= 0, i
;
216 ASSERT(dp
->i_df
.if_flags
& XFS_IFINLINE
);
218 * Catch the case where the conversion from shortform to leaf
219 * failed part way through.
221 if (dp
->i_d
.di_size
< sizeof(xfs_dir_sf_hdr_t
)) {
222 ASSERT(XFS_FORCED_SHUTDOWN(dp
->i_mount
));
223 return XFS_ERROR(EIO
);
225 ASSERT(dp
->i_df
.if_bytes
== dp
->i_d
.di_size
);
226 ASSERT(dp
->i_df
.if_u1
.if_data
!= NULL
);
227 base
= sizeof(xfs_dir_sf_hdr_t
);
228 sf
= (xfs_dir_shortform_t
*)dp
->i_df
.if_u1
.if_data
;
230 for (i
= sf
->hdr
.count
-1; i
>= 0; i
--) {
231 size
= XFS_DIR_SF_ENTSIZE_BYENTRY(sfe
);
232 if (sfe
->namelen
== args
->namelen
&&
233 sfe
->name
[0] == args
->name
[0] &&
234 memcmp(sfe
->name
, args
->name
, args
->namelen
) == 0)
237 sfe
= XFS_DIR_SF_NEXTENTRY(sfe
);
240 ASSERT(args
->oknoent
);
241 return XFS_ERROR(ENOENT
);
244 if ((base
+ size
) != dp
->i_d
.di_size
) {
245 memmove(&((char *)sf
)[base
], &((char *)sf
)[base
+size
],
246 dp
->i_d
.di_size
- (base
+size
));
250 xfs_idata_realloc(dp
, -size
, XFS_DATA_FORK
);
251 dp
->i_d
.di_size
-= size
;
252 xfs_trans_log_inode(args
->trans
, dp
, XFS_ILOG_CORE
| XFS_ILOG_DDATA
);
258 * Look up a name in a shortform directory structure.
261 xfs_dir_shortform_lookup(xfs_da_args_t
*args
)
263 xfs_dir_shortform_t
*sf
;
264 xfs_dir_sf_entry_t
*sfe
;
269 ASSERT(dp
->i_df
.if_flags
& XFS_IFINLINE
);
271 * Catch the case where the conversion from shortform to leaf
272 * failed part way through.
274 if (dp
->i_d
.di_size
< sizeof(xfs_dir_sf_hdr_t
)) {
275 ASSERT(XFS_FORCED_SHUTDOWN(dp
->i_mount
));
276 return XFS_ERROR(EIO
);
278 ASSERT(dp
->i_df
.if_bytes
== dp
->i_d
.di_size
);
279 ASSERT(dp
->i_df
.if_u1
.if_data
!= NULL
);
280 sf
= (xfs_dir_shortform_t
*)dp
->i_df
.if_u1
.if_data
;
281 if (args
->namelen
== 2 &&
282 args
->name
[0] == '.' && args
->name
[1] == '.') {
283 XFS_DIR_SF_GET_DIRINO(&sf
->hdr
.parent
, &args
->inumber
);
284 return(XFS_ERROR(EEXIST
));
286 if (args
->namelen
== 1 && args
->name
[0] == '.') {
287 args
->inumber
= dp
->i_ino
;
288 return(XFS_ERROR(EEXIST
));
291 for (i
= sf
->hdr
.count
-1; i
>= 0; i
--) {
292 if (sfe
->namelen
== args
->namelen
&&
293 sfe
->name
[0] == args
->name
[0] &&
294 memcmp(args
->name
, sfe
->name
, args
->namelen
) == 0) {
295 XFS_DIR_SF_GET_DIRINO(&sfe
->inumber
, &args
->inumber
);
296 return(XFS_ERROR(EEXIST
));
298 sfe
= XFS_DIR_SF_NEXTENTRY(sfe
);
300 ASSERT(args
->oknoent
);
301 return(XFS_ERROR(ENOENT
));
305 * Convert from using the shortform to the leaf.
308 xfs_dir_shortform_to_leaf(xfs_da_args_t
*iargs
)
311 xfs_dir_shortform_t
*sf
;
312 xfs_dir_sf_entry_t
*sfe
;
322 * Catch the case where the conversion from shortform to leaf
323 * failed part way through.
325 if (dp
->i_d
.di_size
< sizeof(xfs_dir_sf_hdr_t
)) {
326 ASSERT(XFS_FORCED_SHUTDOWN(dp
->i_mount
));
327 return XFS_ERROR(EIO
);
329 ASSERT(dp
->i_df
.if_bytes
== dp
->i_d
.di_size
);
330 ASSERT(dp
->i_df
.if_u1
.if_data
!= NULL
);
331 size
= dp
->i_df
.if_bytes
;
332 tmpbuffer
= kmem_alloc(size
, KM_SLEEP
);
333 ASSERT(tmpbuffer
!= NULL
);
335 memcpy(tmpbuffer
, dp
->i_df
.if_u1
.if_data
, size
);
337 sf
= (xfs_dir_shortform_t
*)tmpbuffer
;
338 XFS_DIR_SF_GET_DIRINO(&sf
->hdr
.parent
, &inumber
);
340 xfs_idata_realloc(dp
, -size
, XFS_DATA_FORK
);
342 xfs_trans_log_inode(iargs
->trans
, dp
, XFS_ILOG_CORE
);
343 retval
= xfs_da_grow_inode(iargs
, &blkno
);
348 retval
= xfs_dir_leaf_create(iargs
, blkno
, &bp
);
355 args
.hashval
= xfs_dir_hash_dot
;
356 args
.inumber
= dp
->i_ino
;
358 args
.firstblock
= iargs
->firstblock
;
359 args
.flist
= iargs
->flist
;
360 args
.total
= iargs
->total
;
361 args
.whichfork
= XFS_DATA_FORK
;
362 args
.trans
= iargs
->trans
;
364 args
.addname
= args
.oknoent
= 1;
365 retval
= xfs_dir_leaf_addname(&args
);
371 args
.hashval
= xfs_dir_hash_dotdot
;
372 args
.inumber
= inumber
;
373 retval
= xfs_dir_leaf_addname(&args
);
378 for (i
= 0; i
< sf
->hdr
.count
; i
++) {
379 args
.name
= (char *)(sfe
->name
);
380 args
.namelen
= sfe
->namelen
;
381 args
.hashval
= xfs_da_hashname((char *)(sfe
->name
),
383 XFS_DIR_SF_GET_DIRINO(&sfe
->inumber
, &args
.inumber
);
384 retval
= xfs_dir_leaf_addname(&args
);
387 sfe
= XFS_DIR_SF_NEXTENTRY(sfe
);
392 kmem_free(tmpbuffer
, size
);
397 xfs_dir_shortform_compare(const void *a
, const void *b
)
399 xfs_dir_sf_sort_t
*sa
, *sb
;
401 sa
= (xfs_dir_sf_sort_t
*)a
;
402 sb
= (xfs_dir_sf_sort_t
*)b
;
403 if (sa
->hash
< sb
->hash
)
405 else if (sa
->hash
> sb
->hash
)
408 return sa
->entno
- sb
->entno
;
412 * Copy out directory entries for getdents(), for shortform directories.
416 xfs_dir_shortform_getdents(xfs_inode_t
*dp
, uio_t
*uio
, int *eofp
,
417 xfs_dirent_t
*dbp
, xfs_dir_put_t put
)
419 xfs_dir_shortform_t
*sf
;
420 xfs_dir_sf_entry_t
*sfe
;
421 int retval
, i
, sbsize
, nsbuf
, lastresid
=0, want_entno
;
423 xfs_dahash_t cookhash
, hash
;
424 xfs_dir_put_args_t p
;
425 xfs_dir_sf_sort_t
*sbuf
, *sbp
;
428 sf
= (xfs_dir_shortform_t
*)dp
->i_df
.if_u1
.if_data
;
429 cookhash
= XFS_DA_COOKIE_HASH(mp
, uio
->uio_offset
);
430 want_entno
= XFS_DA_COOKIE_ENTRY(mp
, uio
->uio_offset
);
431 nsbuf
= sf
->hdr
.count
+ 2;
432 sbsize
= (nsbuf
+ 1) * sizeof(*sbuf
);
433 sbp
= sbuf
= kmem_alloc(sbsize
, KM_SLEEP
);
435 xfs_dir_trace_g_du("sf: start", dp
, uio
);
438 * Collect all the entries into the buffer.
443 sbp
->hash
= xfs_dir_hash_dot
;
444 sbp
->ino
= dp
->i_ino
;
454 sbp
->hash
= xfs_dir_hash_dotdot
;
455 sbp
->ino
= XFS_GET_DIR_INO8(sf
->hdr
.parent
);
461 * Scan the directory data for the rest of the entries.
463 for (i
= 0, sfe
= &sf
->list
[0]; i
< sf
->hdr
.count
; i
++) {
466 ((char *)sfe
< (char *)sf
) ||
467 ((char *)sfe
>= ((char *)sf
+ dp
->i_df
.if_bytes
)))) {
468 xfs_dir_trace_g_du("sf: corrupted", dp
, uio
);
469 XFS_CORRUPTION_ERROR("xfs_dir_shortform_getdents",
470 XFS_ERRLEVEL_LOW
, mp
, sfe
);
471 kmem_free(sbuf
, sbsize
);
472 return XFS_ERROR(EFSCORRUPTED
);
477 sbp
->hash
= xfs_da_hashname((char *)sfe
->name
, sfe
->namelen
);
478 sbp
->ino
= XFS_GET_DIR_INO8(sfe
->inumber
);
479 sbp
->name
= (char *)sfe
->name
;
480 sbp
->namelen
= sfe
->namelen
;
481 sfe
= XFS_DIR_SF_NEXTENTRY(sfe
);
486 * Sort the entries on hash then entno.
488 xfs_sort(sbuf
, nsbuf
, sizeof(*sbuf
), xfs_dir_shortform_compare
);
490 * Stuff in last entry.
493 sbp
->hash
= XFS_DA_MAXHASH
;
496 * Figure out the sequence numbers in case there's a hash duplicate.
498 for (hash
= sbuf
->hash
, sbp
= sbuf
+ 1;
499 sbp
< &sbuf
[nsbuf
+ 1]; sbp
++) {
500 if (sbp
->hash
== hash
)
501 sbp
->seqno
= sbp
[-1].seqno
+ 1;
507 * Set up put routine.
516 for (sbp
= sbuf
; sbp
< &sbuf
[nsbuf
+ 1]; sbp
++) {
517 if (sbp
->hash
> cookhash
||
518 (sbp
->hash
== cookhash
&& sbp
->seqno
>= want_entno
))
523 * Did we fail to find anything? We stop at the last entry,
524 * the one we put maxhash into.
526 if (sbp
== &sbuf
[nsbuf
]) {
527 kmem_free(sbuf
, sbsize
);
528 xfs_dir_trace_g_du("sf: hash beyond end", dp
, uio
);
529 uio
->uio_offset
= XFS_DA_MAKE_COOKIE(mp
, 0, 0, XFS_DA_MAXHASH
);
535 * Loop putting entries into the user buffer.
537 while (sbp
< &sbuf
[nsbuf
]) {
539 * Save the first resid in a run of equal-hashval entries
540 * so that we can back them out if they don't all fit.
542 if (sbp
->seqno
== 0 || sbp
== sbuf
)
543 lastresid
= uio
->uio_resid
;
544 XFS_PUT_COOKIE(p
.cook
, mp
, 0, sbp
[1].seqno
, sbp
[1].hash
);
547 p
.ino
+= mp
->m_inoadd
;
550 p
.namelen
= sbp
->namelen
;
554 XFS_DA_MAKE_COOKIE(mp
, 0, 0, sbp
->hash
);
555 kmem_free(sbuf
, sbsize
);
556 uio
->uio_resid
= lastresid
;
557 xfs_dir_trace_g_du("sf: E-O-B", dp
, uio
);
562 kmem_free(sbuf
, sbsize
);
563 uio
->uio_offset
= p
.cook
.o
;
565 xfs_dir_trace_g_du("sf: E-O-F", dp
, uio
);
570 * Look up a name in a shortform directory structure, replace the inode number.
573 xfs_dir_shortform_replace(xfs_da_args_t
*args
)
575 xfs_dir_shortform_t
*sf
;
576 xfs_dir_sf_entry_t
*sfe
;
581 ASSERT(dp
->i_df
.if_flags
& XFS_IFINLINE
);
583 * Catch the case where the conversion from shortform to leaf
584 * failed part way through.
586 if (dp
->i_d
.di_size
< sizeof(xfs_dir_sf_hdr_t
)) {
587 ASSERT(XFS_FORCED_SHUTDOWN(dp
->i_mount
));
588 return XFS_ERROR(EIO
);
590 ASSERT(dp
->i_df
.if_bytes
== dp
->i_d
.di_size
);
591 ASSERT(dp
->i_df
.if_u1
.if_data
!= NULL
);
592 sf
= (xfs_dir_shortform_t
*)dp
->i_df
.if_u1
.if_data
;
593 if (args
->namelen
== 2 &&
594 args
->name
[0] == '.' && args
->name
[1] == '.') {
595 /* XXX - replace assert? */
596 XFS_DIR_SF_PUT_DIRINO(&args
->inumber
, &sf
->hdr
.parent
);
597 xfs_trans_log_inode(args
->trans
, dp
, XFS_ILOG_DDATA
);
600 ASSERT(args
->namelen
!= 1 || args
->name
[0] != '.');
602 for (i
= sf
->hdr
.count
-1; i
>= 0; i
--) {
603 if (sfe
->namelen
== args
->namelen
&&
604 sfe
->name
[0] == args
->name
[0] &&
605 memcmp(args
->name
, sfe
->name
, args
->namelen
) == 0) {
606 ASSERT(memcmp((char *)&args
->inumber
,
607 (char *)&sfe
->inumber
, sizeof(xfs_ino_t
)));
608 XFS_DIR_SF_PUT_DIRINO(&args
->inumber
, &sfe
->inumber
);
609 xfs_trans_log_inode(args
->trans
, dp
, XFS_ILOG_DDATA
);
612 sfe
= XFS_DIR_SF_NEXTENTRY(sfe
);
614 ASSERT(args
->oknoent
);
615 return XFS_ERROR(ENOENT
);
619 * Convert a leaf directory to shortform structure
622 xfs_dir_leaf_to_shortform(xfs_da_args_t
*iargs
)
624 xfs_dir_leafblock_t
*leaf
;
625 xfs_dir_leaf_hdr_t
*hdr
;
626 xfs_dir_leaf_entry_t
*entry
;
627 xfs_dir_leaf_name_t
*namest
;
630 xfs_ino_t parent
= 0;
636 tmpbuffer
= kmem_alloc(XFS_LBSIZE(dp
->i_mount
), KM_SLEEP
);
637 ASSERT(tmpbuffer
!= NULL
);
639 retval
= xfs_da_read_buf(iargs
->trans
, iargs
->dp
, 0, -1, &bp
,
644 memcpy(tmpbuffer
, bp
->data
, XFS_LBSIZE(dp
->i_mount
));
645 leaf
= (xfs_dir_leafblock_t
*)tmpbuffer
;
646 ASSERT(be16_to_cpu(leaf
->hdr
.info
.magic
) == XFS_DIR_LEAF_MAGIC
);
647 memset(bp
->data
, 0, XFS_LBSIZE(dp
->i_mount
));
650 * Find and special case the parent inode number
653 entry
= &leaf
->entries
[0];
654 for (i
= be16_to_cpu(hdr
->count
)-1; i
>= 0; entry
++, i
--) {
655 namest
= XFS_DIR_LEAF_NAMESTRUCT(leaf
, be16_to_cpu(entry
->nameidx
));
656 if ((entry
->namelen
== 2) &&
657 (namest
->name
[0] == '.') &&
658 (namest
->name
[1] == '.')) {
659 XFS_DIR_SF_GET_DIRINO(&namest
->inumber
, &parent
);
661 } else if ((entry
->namelen
== 1) && (namest
->name
[0] == '.')) {
665 retval
= xfs_da_shrink_inode(iargs
, 0, bp
);
668 retval
= xfs_dir_shortform_create(iargs
, parent
);
673 * Copy the rest of the filenames
675 entry
= &leaf
->entries
[0];
677 args
.firstblock
= iargs
->firstblock
;
678 args
.flist
= iargs
->flist
;
679 args
.total
= iargs
->total
;
680 args
.whichfork
= XFS_DATA_FORK
;
681 args
.trans
= iargs
->trans
;
683 args
.addname
= args
.oknoent
= 1;
684 for (i
= 0; i
< be16_to_cpu(hdr
->count
); entry
++, i
++) {
687 namest
= XFS_DIR_LEAF_NAMESTRUCT(leaf
, be16_to_cpu(entry
->nameidx
));
688 args
.name
= (char *)(namest
->name
);
689 args
.namelen
= entry
->namelen
;
690 args
.hashval
= be32_to_cpu(entry
->hashval
);
691 XFS_DIR_SF_GET_DIRINO(&namest
->inumber
, &args
.inumber
);
692 xfs_dir_shortform_addname(&args
);
696 kmem_free(tmpbuffer
, XFS_LBSIZE(dp
->i_mount
));
701 * Convert from using a single leaf to a root node and a leaf.
704 xfs_dir_leaf_to_node(xfs_da_args_t
*args
)
706 xfs_dir_leafblock_t
*leaf
;
707 xfs_da_intnode_t
*node
;
709 xfs_dabuf_t
*bp1
, *bp2
;
714 retval
= xfs_da_grow_inode(args
, &blkno
);
718 retval
= xfs_da_read_buf(args
->trans
, args
->dp
, 0, -1, &bp1
,
723 retval
= xfs_da_get_buf(args
->trans
, args
->dp
, 1, -1, &bp2
,
726 xfs_da_buf_done(bp1
);
730 memcpy(bp2
->data
, bp1
->data
, XFS_LBSIZE(dp
->i_mount
));
731 xfs_da_buf_done(bp1
);
732 xfs_da_log_buf(args
->trans
, bp2
, 0, XFS_LBSIZE(dp
->i_mount
) - 1);
735 * Set up the new root node.
737 retval
= xfs_da_node_create(args
, 0, 1, &bp1
, XFS_DATA_FORK
);
739 xfs_da_buf_done(bp2
);
744 ASSERT(be16_to_cpu(leaf
->hdr
.info
.magic
) == XFS_DIR_LEAF_MAGIC
);
745 node
->btree
[0].hashval
= leaf
->entries
[be16_to_cpu(leaf
->hdr
.count
)-1].hashval
;
746 xfs_da_buf_done(bp2
);
747 node
->btree
[0].before
= cpu_to_be32(blkno
);
748 node
->hdr
.count
= cpu_to_be16(1);
749 xfs_da_log_buf(args
->trans
, bp1
,
750 XFS_DA_LOGRANGE(node
, &node
->btree
[0], sizeof(node
->btree
[0])));
751 xfs_da_buf_done(bp1
);
757 /*========================================================================
758 * Routines used for growing the Btree.
759 *========================================================================*/
762 * Create the initial contents of a leaf directory
763 * or a leaf in a node directory.
766 xfs_dir_leaf_create(xfs_da_args_t
*args
, xfs_dablk_t blkno
, xfs_dabuf_t
**bpp
)
768 xfs_dir_leafblock_t
*leaf
;
769 xfs_dir_leaf_hdr_t
*hdr
;
776 retval
= xfs_da_get_buf(args
->trans
, dp
, blkno
, -1, &bp
, XFS_DATA_FORK
);
781 memset((char *)leaf
, 0, XFS_LBSIZE(dp
->i_mount
));
783 hdr
->info
.magic
= cpu_to_be16(XFS_DIR_LEAF_MAGIC
);
784 hdr
->firstused
= cpu_to_be16(XFS_LBSIZE(dp
->i_mount
));
786 hdr
->firstused
= cpu_to_be16(XFS_LBSIZE(dp
->i_mount
) - 1);
787 hdr
->freemap
[0].base
= cpu_to_be16(sizeof(xfs_dir_leaf_hdr_t
));
788 hdr
->freemap
[0].size
= cpu_to_be16(be16_to_cpu(hdr
->firstused
) -
789 be16_to_cpu(hdr
->freemap
[0].base
));
791 xfs_da_log_buf(args
->trans
, bp
, 0, XFS_LBSIZE(dp
->i_mount
) - 1);
798 * Split the leaf node, rebalance, then add the new entry.
801 xfs_dir_leaf_split(xfs_da_state_t
*state
, xfs_da_state_blk_t
*oldblk
,
802 xfs_da_state_blk_t
*newblk
)
809 * Allocate space for a new leaf node.
812 ASSERT(args
!= NULL
);
813 ASSERT(oldblk
->magic
== XFS_DIR_LEAF_MAGIC
);
814 error
= xfs_da_grow_inode(args
, &blkno
);
817 error
= xfs_dir_leaf_create(args
, blkno
, &newblk
->bp
);
820 newblk
->blkno
= blkno
;
821 newblk
->magic
= XFS_DIR_LEAF_MAGIC
;
824 * Rebalance the entries across the two leaves.
826 xfs_dir_leaf_rebalance(state
, oldblk
, newblk
);
827 error
= xfs_da_blk_link(state
, oldblk
, newblk
);
832 * Insert the new entry in the correct block.
835 error
= xfs_dir_leaf_add(oldblk
->bp
, args
, oldblk
->index
);
837 error
= xfs_dir_leaf_add(newblk
->bp
, args
, newblk
->index
);
841 * Update last hashval in each block since we added the name.
843 oldblk
->hashval
= xfs_dir_leaf_lasthash(oldblk
->bp
, NULL
);
844 newblk
->hashval
= xfs_dir_leaf_lasthash(newblk
->bp
, NULL
);
849 * Add a name to the leaf directory structure.
851 * Must take into account fragmented leaves and leaves where spacemap has
852 * lost some freespace information (ie: holes).
855 xfs_dir_leaf_add(xfs_dabuf_t
*bp
, xfs_da_args_t
*args
, int index
)
857 xfs_dir_leafblock_t
*leaf
;
858 xfs_dir_leaf_hdr_t
*hdr
;
859 xfs_dir_leaf_map_t
*map
;
860 int tablesize
, entsize
, sum
, i
, tmp
, error
;
863 ASSERT(be16_to_cpu(leaf
->hdr
.info
.magic
) == XFS_DIR_LEAF_MAGIC
);
864 ASSERT((index
>= 0) && (index
<= be16_to_cpu(leaf
->hdr
.count
)));
866 entsize
= XFS_DIR_LEAF_ENTSIZE_BYNAME(args
->namelen
);
869 * Search through freemap for first-fit on new name length.
870 * (may need to figure in size of entry struct too)
872 tablesize
= (be16_to_cpu(hdr
->count
) + 1) *
873 sizeof(xfs_dir_leaf_entry_t
) + sizeof(xfs_dir_leaf_hdr_t
);
874 map
= &hdr
->freemap
[XFS_DIR_LEAF_MAPSIZE
-1];
875 for (sum
= 0, i
= XFS_DIR_LEAF_MAPSIZE
-1; i
>= 0; map
--, i
--) {
876 if (tablesize
> be16_to_cpu(hdr
->firstused
)) {
877 sum
+= be16_to_cpu(map
->size
);
881 continue; /* no space in this map */
883 if (be16_to_cpu(map
->base
) < be16_to_cpu(hdr
->firstused
))
884 tmp
+= (uint
)sizeof(xfs_dir_leaf_entry_t
);
885 if (be16_to_cpu(map
->size
) >= tmp
) {
886 if (!args
->justcheck
)
887 xfs_dir_leaf_add_work(bp
, args
, index
, i
);
890 sum
+= be16_to_cpu(map
->size
);
894 * If there are no holes in the address space of the block,
895 * and we don't have enough freespace, then compaction will do us
896 * no good and we should just give up.
898 if (!hdr
->holes
&& (sum
< entsize
))
899 return XFS_ERROR(ENOSPC
);
902 * Compact the entries to coalesce free space.
903 * Pass the justcheck flag so the checking pass can return
904 * an error, without changing anything, if it won't fit.
906 error
= xfs_dir_leaf_compact(args
->trans
, bp
,
909 (uint
)sizeof(xfs_dir_leaf_entry_t
) : 0,
914 * After compaction, the block is guaranteed to have only one
915 * free region, in freemap[0]. If it is not big enough, give up.
917 if (be16_to_cpu(hdr
->freemap
[0].size
) <
918 (entsize
+ (uint
)sizeof(xfs_dir_leaf_entry_t
)))
919 return XFS_ERROR(ENOSPC
);
921 if (!args
->justcheck
)
922 xfs_dir_leaf_add_work(bp
, args
, index
, 0);
927 * Add a name to a leaf directory structure.
930 xfs_dir_leaf_add_work(xfs_dabuf_t
*bp
, xfs_da_args_t
*args
, int index
,
933 xfs_dir_leafblock_t
*leaf
;
934 xfs_dir_leaf_hdr_t
*hdr
;
935 xfs_dir_leaf_entry_t
*entry
;
936 xfs_dir_leaf_name_t
*namest
;
937 xfs_dir_leaf_map_t
*map
;
943 ASSERT(be16_to_cpu(leaf
->hdr
.info
.magic
) == XFS_DIR_LEAF_MAGIC
);
945 ASSERT((mapindex
>= 0) && (mapindex
< XFS_DIR_LEAF_MAPSIZE
));
946 ASSERT((index
>= 0) && (index
<= be16_to_cpu(hdr
->count
)));
949 * Force open some space in the entry array and fill it in.
951 entry
= &leaf
->entries
[index
];
952 if (index
< be16_to_cpu(hdr
->count
)) {
953 tmp
= be16_to_cpu(hdr
->count
) - index
;
954 tmp
*= (uint
)sizeof(xfs_dir_leaf_entry_t
);
955 memmove(entry
+ 1, entry
, tmp
);
956 xfs_da_log_buf(args
->trans
, bp
,
957 XFS_DA_LOGRANGE(leaf
, entry
, tmp
+ (uint
)sizeof(*entry
)));
959 be16_add(&hdr
->count
, 1);
962 * Allocate space for the new string (at the end of the run).
964 map
= &hdr
->freemap
[mapindex
];
965 mp
= args
->trans
->t_mountp
;
966 ASSERT(be16_to_cpu(map
->base
) < XFS_LBSIZE(mp
));
967 ASSERT(be16_to_cpu(map
->size
) >= XFS_DIR_LEAF_ENTSIZE_BYNAME(args
->namelen
));
968 ASSERT(be16_to_cpu(map
->size
) < XFS_LBSIZE(mp
));
970 be16_add(&map
->size
, -(XFS_DIR_LEAF_ENTSIZE_BYNAME(args
->namelen
)));
971 entry
->nameidx
= cpu_to_be16(be16_to_cpu(map
->base
) +
972 be16_to_cpu(map
->size
));
973 entry
->hashval
= cpu_to_be32(args
->hashval
);
974 entry
->namelen
= args
->namelen
;
975 xfs_da_log_buf(args
->trans
, bp
,
976 XFS_DA_LOGRANGE(leaf
, entry
, sizeof(*entry
)));
979 * Copy the string and inode number into the new space.
981 namest
= XFS_DIR_LEAF_NAMESTRUCT(leaf
, be16_to_cpu(entry
->nameidx
));
982 XFS_DIR_SF_PUT_DIRINO(&args
->inumber
, &namest
->inumber
);
983 memcpy(namest
->name
, args
->name
, args
->namelen
);
984 xfs_da_log_buf(args
->trans
, bp
,
985 XFS_DA_LOGRANGE(leaf
, namest
, XFS_DIR_LEAF_ENTSIZE_BYENTRY(entry
)));
988 * Update the control info for this leaf node
990 if (be16_to_cpu(entry
->nameidx
) < be16_to_cpu(hdr
->firstused
))
991 hdr
->firstused
= entry
->nameidx
;
992 ASSERT(be16_to_cpu(hdr
->firstused
) >=
993 ((be16_to_cpu(hdr
->count
)*sizeof(*entry
))+sizeof(*hdr
)));
994 tmp
= (be16_to_cpu(hdr
->count
)-1) * (uint
)sizeof(xfs_dir_leaf_entry_t
)
995 + (uint
)sizeof(xfs_dir_leaf_hdr_t
);
996 map
= &hdr
->freemap
[0];
997 for (i
= 0; i
< XFS_DIR_LEAF_MAPSIZE
; map
++, i
++) {
998 if (be16_to_cpu(map
->base
) == tmp
) {
999 int entry_size
= sizeof(xfs_dir_leaf_entry_t
);
1000 be16_add(&map
->base
, entry_size
);
1001 be16_add(&map
->size
, -entry_size
);
1004 be16_add(&hdr
->namebytes
, args
->namelen
);
1005 xfs_da_log_buf(args
->trans
, bp
,
1006 XFS_DA_LOGRANGE(leaf
, hdr
, sizeof(*hdr
)));
1010 * Garbage collect a leaf directory block by copying it to a new buffer.
1013 xfs_dir_leaf_compact(xfs_trans_t
*trans
, xfs_dabuf_t
*bp
, int musthave
,
1016 xfs_dir_leafblock_t
*leaf_s
, *leaf_d
;
1017 xfs_dir_leaf_hdr_t
*hdr_s
, *hdr_d
;
1020 char *tmpbuffer2
=NULL
;
1024 mp
= trans
->t_mountp
;
1025 lbsize
= XFS_LBSIZE(mp
);
1026 tmpbuffer
= kmem_alloc(lbsize
, KM_SLEEP
);
1027 ASSERT(tmpbuffer
!= NULL
);
1028 memcpy(tmpbuffer
, bp
->data
, lbsize
);
1031 * Make a second copy in case xfs_dir_leaf_moveents()
1032 * below destroys the original.
1034 if (musthave
|| justcheck
) {
1035 tmpbuffer2
= kmem_alloc(lbsize
, KM_SLEEP
);
1036 memcpy(tmpbuffer2
, bp
->data
, lbsize
);
1038 memset(bp
->data
, 0, lbsize
);
1041 * Copy basic information
1043 leaf_s
= (xfs_dir_leafblock_t
*)tmpbuffer
;
1045 hdr_s
= &leaf_s
->hdr
;
1046 hdr_d
= &leaf_d
->hdr
;
1047 hdr_d
->info
= hdr_s
->info
; /* struct copy */
1048 hdr_d
->firstused
= cpu_to_be16(lbsize
);
1049 if (!hdr_d
->firstused
)
1050 hdr_d
->firstused
= cpu_to_be16(lbsize
- 1);
1051 hdr_d
->namebytes
= 0;
1054 hdr_d
->freemap
[0].base
= cpu_to_be16(sizeof(xfs_dir_leaf_hdr_t
));
1055 hdr_d
->freemap
[0].size
= cpu_to_be16(be16_to_cpu(hdr_d
->firstused
) -
1056 be16_to_cpu(hdr_d
->freemap
[0].base
));
1059 * Copy all entry's in the same (sorted) order,
1060 * but allocate filenames packed and in sequence.
1061 * This changes the source (leaf_s) as well.
1063 xfs_dir_leaf_moveents(leaf_s
, 0, leaf_d
, 0, be16_to_cpu(hdr_s
->count
), mp
);
1065 if (musthave
&& be16_to_cpu(hdr_d
->freemap
[0].size
) < musthave
)
1066 rval
= XFS_ERROR(ENOSPC
);
1070 if (justcheck
|| rval
== ENOSPC
) {
1072 memcpy(bp
->data
, tmpbuffer2
, lbsize
);
1074 xfs_da_log_buf(trans
, bp
, 0, lbsize
- 1);
1077 kmem_free(tmpbuffer
, lbsize
);
1078 if (musthave
|| justcheck
)
1079 kmem_free(tmpbuffer2
, lbsize
);
1084 * Redistribute the directory entries between two leaf nodes,
1085 * taking into account the size of the new entry.
1087 * NOTE: if new block is empty, then it will get the upper half of old block.
1090 xfs_dir_leaf_rebalance(xfs_da_state_t
*state
, xfs_da_state_blk_t
*blk1
,
1091 xfs_da_state_blk_t
*blk2
)
1093 xfs_da_state_blk_t
*tmp_blk
;
1094 xfs_dir_leafblock_t
*leaf1
, *leaf2
;
1095 xfs_dir_leaf_hdr_t
*hdr1
, *hdr2
;
1096 int count
, totallen
, max
, space
, swap
;
1099 * Set up environment.
1101 ASSERT(blk1
->magic
== XFS_DIR_LEAF_MAGIC
);
1102 ASSERT(blk2
->magic
== XFS_DIR_LEAF_MAGIC
);
1103 leaf1
= blk1
->bp
->data
;
1104 leaf2
= blk2
->bp
->data
;
1105 ASSERT(be16_to_cpu(leaf1
->hdr
.info
.magic
) == XFS_DIR_LEAF_MAGIC
);
1106 ASSERT(be16_to_cpu(leaf2
->hdr
.info
.magic
) == XFS_DIR_LEAF_MAGIC
);
1109 * Check ordering of blocks, reverse if it makes things simpler.
1112 if (xfs_dir_leaf_order(blk1
->bp
, blk2
->bp
)) {
1116 leaf1
= blk1
->bp
->data
;
1117 leaf2
= blk2
->bp
->data
;
1124 * Examine entries until we reduce the absolute difference in
1125 * byte usage between the two blocks to a minimum. Then get
1126 * the direction to copy and the number of elements to move.
1128 state
->inleaf
= xfs_dir_leaf_figure_balance(state
, blk1
, blk2
,
1131 state
->inleaf
= !state
->inleaf
;
1134 * Move any entries required from leaf to leaf:
1136 if (count
< be16_to_cpu(hdr1
->count
)) {
1138 * Figure the total bytes to be added to the destination leaf.
1140 count
= be16_to_cpu(hdr1
->count
) - count
; /* number entries being moved */
1141 space
= be16_to_cpu(hdr1
->namebytes
) - totallen
;
1142 space
+= count
* ((uint
)sizeof(xfs_dir_leaf_name_t
)-1);
1143 space
+= count
* (uint
)sizeof(xfs_dir_leaf_entry_t
);
1146 * leaf2 is the destination, compact it if it looks tight.
1148 max
= be16_to_cpu(hdr2
->firstused
) - (uint
)sizeof(xfs_dir_leaf_hdr_t
);
1149 max
-= be16_to_cpu(hdr2
->count
) * (uint
)sizeof(xfs_dir_leaf_entry_t
);
1151 xfs_dir_leaf_compact(state
->args
->trans
, blk2
->bp
,
1156 * Move high entries from leaf1 to low end of leaf2.
1158 xfs_dir_leaf_moveents(leaf1
, be16_to_cpu(hdr1
->count
) - count
,
1159 leaf2
, 0, count
, state
->mp
);
1161 xfs_da_log_buf(state
->args
->trans
, blk1
->bp
, 0,
1162 state
->blocksize
-1);
1163 xfs_da_log_buf(state
->args
->trans
, blk2
->bp
, 0,
1164 state
->blocksize
-1);
1166 } else if (count
> be16_to_cpu(hdr1
->count
)) {
1168 * Figure the total bytes to be added to the destination leaf.
1170 count
-= be16_to_cpu(hdr1
->count
); /* number entries being moved */
1171 space
= totallen
- be16_to_cpu(hdr1
->namebytes
);
1172 space
+= count
* ((uint
)sizeof(xfs_dir_leaf_name_t
)-1);
1173 space
+= count
* (uint
)sizeof(xfs_dir_leaf_entry_t
);
1176 * leaf1 is the destination, compact it if it looks tight.
1178 max
= be16_to_cpu(hdr1
->firstused
) - (uint
)sizeof(xfs_dir_leaf_hdr_t
);
1179 max
-= be16_to_cpu(hdr1
->count
) * (uint
)sizeof(xfs_dir_leaf_entry_t
);
1181 xfs_dir_leaf_compact(state
->args
->trans
, blk1
->bp
,
1186 * Move low entries from leaf2 to high end of leaf1.
1188 xfs_dir_leaf_moveents(leaf2
, 0, leaf1
, be16_to_cpu(hdr1
->count
),
1191 xfs_da_log_buf(state
->args
->trans
, blk1
->bp
, 0,
1192 state
->blocksize
-1);
1193 xfs_da_log_buf(state
->args
->trans
, blk2
->bp
, 0,
1194 state
->blocksize
-1);
1198 * Copy out last hashval in each block for B-tree code.
1200 blk1
->hashval
= be32_to_cpu(leaf1
->entries
[
1201 be16_to_cpu(leaf1
->hdr
.count
)-1].hashval
);
1202 blk2
->hashval
= be32_to_cpu(leaf2
->entries
[
1203 be16_to_cpu(leaf2
->hdr
.count
)-1].hashval
);
1206 * Adjust the expected index for insertion.
1207 * GROT: this doesn't work unless blk2 was originally empty.
1209 if (!state
->inleaf
) {
1210 blk2
->index
= blk1
->index
- be16_to_cpu(leaf1
->hdr
.count
);
1215 * Examine entries until we reduce the absolute difference in
1216 * byte usage between the two blocks to a minimum.
1217 * GROT: Is this really necessary? With other than a 512 byte blocksize,
1218 * GROT: there will always be enough room in either block for a new entry.
1219 * GROT: Do a double-split for this case?
1222 xfs_dir_leaf_figure_balance(xfs_da_state_t
*state
,
1223 xfs_da_state_blk_t
*blk1
,
1224 xfs_da_state_blk_t
*blk2
,
1225 int *countarg
, int *namebytesarg
)
1227 xfs_dir_leafblock_t
*leaf1
, *leaf2
;
1228 xfs_dir_leaf_hdr_t
*hdr1
, *hdr2
;
1229 xfs_dir_leaf_entry_t
*entry
;
1230 int count
, max
, totallen
, half
;
1231 int lastdelta
, foundit
, tmp
;
1234 * Set up environment.
1236 leaf1
= blk1
->bp
->data
;
1237 leaf2
= blk2
->bp
->data
;
1244 * Examine entries until we reduce the absolute difference in
1245 * byte usage between the two blocks to a minimum.
1247 max
= be16_to_cpu(hdr1
->count
) + be16_to_cpu(hdr2
->count
);
1248 half
= (max
+1) * (uint
)(sizeof(*entry
)+sizeof(xfs_dir_leaf_entry_t
)-1);
1249 half
+= be16_to_cpu(hdr1
->namebytes
) + be16_to_cpu(hdr2
->namebytes
) +
1250 state
->args
->namelen
;
1252 lastdelta
= state
->blocksize
;
1253 entry
= &leaf1
->entries
[0];
1254 for (count
= 0; count
< max
; entry
++, count
++) {
1256 #define XFS_DIR_ABS(A) (((A) < 0) ? -(A) : (A))
1258 * The new entry is in the first block, account for it.
1260 if (count
== blk1
->index
) {
1261 tmp
= totallen
+ (uint
)sizeof(*entry
)
1262 + XFS_DIR_LEAF_ENTSIZE_BYNAME(state
->args
->namelen
);
1263 if (XFS_DIR_ABS(half
- tmp
) > lastdelta
)
1265 lastdelta
= XFS_DIR_ABS(half
- tmp
);
1271 * Wrap around into the second block if necessary.
1273 if (count
== be16_to_cpu(hdr1
->count
)) {
1275 entry
= &leaf1
->entries
[0];
1279 * Figure out if next leaf entry would be too much.
1281 tmp
= totallen
+ (uint
)sizeof(*entry
)
1282 + XFS_DIR_LEAF_ENTSIZE_BYENTRY(entry
);
1283 if (XFS_DIR_ABS(half
- tmp
) > lastdelta
)
1285 lastdelta
= XFS_DIR_ABS(half
- tmp
);
1291 * Calculate the number of namebytes that will end up in lower block.
1292 * If new entry not in lower block, fix up the count.
1295 count
* (uint
)(sizeof(*entry
)+sizeof(xfs_dir_leaf_entry_t
)-1);
1297 totallen
-= (sizeof(*entry
)+sizeof(xfs_dir_leaf_entry_t
)-1) +
1298 state
->args
->namelen
;
1302 *namebytesarg
= totallen
;
1306 /*========================================================================
1307 * Routines used for shrinking the Btree.
1308 *========================================================================*/
1311 * Check a leaf block and its neighbors to see if the block should be
1312 * collapsed into one or the other neighbor. Always keep the block
1313 * with the smaller block number.
1314 * If the current block is over 50% full, don't try to join it, return 0.
1315 * If the block is empty, fill in the state structure and return 2.
1316 * If it can be collapsed, fill in the state structure and return 1.
1317 * If nothing can be done, return 0.
1320 xfs_dir_leaf_toosmall(xfs_da_state_t
*state
, int *action
)
1322 xfs_dir_leafblock_t
*leaf
;
1323 xfs_da_state_blk_t
*blk
;
1324 xfs_da_blkinfo_t
*info
;
1325 int count
, bytes
, forward
, error
, retval
, i
;
1330 * Check for the degenerate case of the block being over 50% full.
1331 * If so, it's not worth even looking to see if we might be able
1332 * to coalesce with a sibling.
1334 blk
= &state
->path
.blk
[ state
->path
.active
-1 ];
1335 info
= blk
->bp
->data
;
1336 ASSERT(be16_to_cpu(info
->magic
) == XFS_DIR_LEAF_MAGIC
);
1337 leaf
= (xfs_dir_leafblock_t
*)info
;
1338 count
= be16_to_cpu(leaf
->hdr
.count
);
1339 bytes
= (uint
)sizeof(xfs_dir_leaf_hdr_t
) +
1340 count
* (uint
)sizeof(xfs_dir_leaf_entry_t
) +
1341 count
* ((uint
)sizeof(xfs_dir_leaf_name_t
)-1) +
1342 be16_to_cpu(leaf
->hdr
.namebytes
);
1343 if (bytes
> (state
->blocksize
>> 1)) {
1344 *action
= 0; /* blk over 50%, don't try to join */
1349 * Check for the degenerate case of the block being empty.
1350 * If the block is empty, we'll simply delete it, no need to
1351 * coalesce it with a sibling block. We choose (arbitrarily)
1352 * to merge with the forward block unless it is NULL.
1356 * Make altpath point to the block we want to keep and
1357 * path point to the block we want to drop (this one).
1359 forward
= (info
->forw
!= 0);
1360 memcpy(&state
->altpath
, &state
->path
, sizeof(state
->path
));
1361 error
= xfs_da_path_shift(state
, &state
->altpath
, forward
,
1374 * Examine each sibling block to see if we can coalesce with
1375 * at least 25% free space to spare. We need to figure out
1376 * whether to merge with the forward or the backward block.
1377 * We prefer coalescing with the lower numbered sibling so as
1378 * to shrink a directory over time.
1380 forward
= (be32_to_cpu(info
->forw
) < be32_to_cpu(info
->back
)); /* start with smaller blk num */
1381 for (i
= 0; i
< 2; forward
= !forward
, i
++) {
1383 blkno
= be32_to_cpu(info
->forw
);
1385 blkno
= be32_to_cpu(info
->back
);
1388 error
= xfs_da_read_buf(state
->args
->trans
, state
->args
->dp
,
1395 leaf
= (xfs_dir_leafblock_t
*)info
;
1396 count
= be16_to_cpu(leaf
->hdr
.count
);
1397 bytes
= state
->blocksize
- (state
->blocksize
>>2);
1398 bytes
-= be16_to_cpu(leaf
->hdr
.namebytes
);
1400 ASSERT(be16_to_cpu(leaf
->hdr
.info
.magic
) == XFS_DIR_LEAF_MAGIC
);
1401 count
+= be16_to_cpu(leaf
->hdr
.count
);
1402 bytes
-= be16_to_cpu(leaf
->hdr
.namebytes
);
1403 bytes
-= count
* ((uint
)sizeof(xfs_dir_leaf_name_t
) - 1);
1404 bytes
-= count
* (uint
)sizeof(xfs_dir_leaf_entry_t
);
1405 bytes
-= (uint
)sizeof(xfs_dir_leaf_hdr_t
);
1407 break; /* fits with at least 25% to spare */
1409 xfs_da_brelse(state
->args
->trans
, bp
);
1415 xfs_da_buf_done(bp
);
1418 * Make altpath point to the block we want to keep (the lower
1419 * numbered block) and path point to the block we want to drop.
1421 memcpy(&state
->altpath
, &state
->path
, sizeof(state
->path
));
1422 if (blkno
< blk
->blkno
) {
1423 error
= xfs_da_path_shift(state
, &state
->altpath
, forward
,
1426 error
= xfs_da_path_shift(state
, &state
->path
, forward
,
1440 * Remove a name from the leaf directory structure.
1442 * Return 1 if leaf is less than 37% full, 0 if >= 37% full.
1443 * If two leaves are 37% full, when combined they will leave 25% free.
1446 xfs_dir_leaf_remove(xfs_trans_t
*trans
, xfs_dabuf_t
*bp
, int index
)
1448 xfs_dir_leafblock_t
*leaf
;
1449 xfs_dir_leaf_hdr_t
*hdr
;
1450 xfs_dir_leaf_map_t
*map
;
1451 xfs_dir_leaf_entry_t
*entry
;
1452 xfs_dir_leaf_name_t
*namest
;
1453 int before
, after
, smallest
, entsize
;
1454 int tablesize
, tmp
, i
;
1458 ASSERT(be16_to_cpu(leaf
->hdr
.info
.magic
) == XFS_DIR_LEAF_MAGIC
);
1460 mp
= trans
->t_mountp
;
1461 ASSERT(hdr
->count
&& (be16_to_cpu(hdr
->count
) < (XFS_LBSIZE(mp
)/8)));
1462 ASSERT((index
>= 0) && (index
< be16_to_cpu(hdr
->count
)));
1463 ASSERT(be16_to_cpu(hdr
->firstused
) >=
1464 ((be16_to_cpu(hdr
->count
)*sizeof(*entry
))+sizeof(*hdr
)));
1465 entry
= &leaf
->entries
[index
];
1466 ASSERT(be16_to_cpu(entry
->nameidx
) >= be16_to_cpu(hdr
->firstused
));
1467 ASSERT(be16_to_cpu(entry
->nameidx
) < XFS_LBSIZE(mp
));
1470 * Scan through free region table:
1471 * check for adjacency of free'd entry with an existing one,
1472 * find smallest free region in case we need to replace it,
1473 * adjust any map that borders the entry table,
1475 tablesize
= be16_to_cpu(hdr
->count
) * (uint
)sizeof(xfs_dir_leaf_entry_t
)
1476 + (uint
)sizeof(xfs_dir_leaf_hdr_t
);
1477 map
= &hdr
->freemap
[0];
1478 tmp
= be16_to_cpu(map
->size
);
1479 before
= after
= -1;
1480 smallest
= XFS_DIR_LEAF_MAPSIZE
- 1;
1481 entsize
= XFS_DIR_LEAF_ENTSIZE_BYENTRY(entry
);
1482 for (i
= 0; i
< XFS_DIR_LEAF_MAPSIZE
; map
++, i
++) {
1483 ASSERT(be16_to_cpu(map
->base
) < XFS_LBSIZE(mp
));
1484 ASSERT(be16_to_cpu(map
->size
) < XFS_LBSIZE(mp
));
1485 if (be16_to_cpu(map
->base
) == tablesize
) {
1486 int entry_size
= sizeof(xfs_dir_leaf_entry_t
);
1487 be16_add(&map
->base
, -entry_size
);
1488 be16_add(&map
->size
, entry_size
);
1491 if ((be16_to_cpu(map
->base
) + be16_to_cpu(map
->size
)) ==
1492 be16_to_cpu(entry
->nameidx
)) {
1494 } else if (be16_to_cpu(map
->base
) ==
1495 (be16_to_cpu(entry
->nameidx
) + entsize
)) {
1497 } else if (be16_to_cpu(map
->size
) < tmp
) {
1498 tmp
= be16_to_cpu(map
->size
);
1504 * Coalesce adjacent freemap regions,
1505 * or replace the smallest region.
1507 if ((before
>= 0) || (after
>= 0)) {
1508 if ((before
>= 0) && (after
>= 0)) {
1509 map
= &hdr
->freemap
[before
];
1510 be16_add(&map
->size
, entsize
);
1511 be16_add(&map
->size
, be16_to_cpu(hdr
->freemap
[after
].size
));
1512 hdr
->freemap
[after
].base
= 0;
1513 hdr
->freemap
[after
].size
= 0;
1514 } else if (before
>= 0) {
1515 map
= &hdr
->freemap
[before
];
1516 be16_add(&map
->size
, entsize
);
1518 map
= &hdr
->freemap
[after
];
1519 map
->base
= entry
->nameidx
;
1520 be16_add(&map
->size
, entsize
);
1524 * Replace smallest region (if it is smaller than free'd entry)
1526 map
= &hdr
->freemap
[smallest
];
1527 if (be16_to_cpu(map
->size
) < entsize
) {
1528 map
->base
= entry
->nameidx
;
1529 map
->size
= cpu_to_be16(entsize
);
1534 * Did we remove the first entry?
1536 if (be16_to_cpu(entry
->nameidx
) == be16_to_cpu(hdr
->firstused
))
1542 * Compress the remaining entries and zero out the removed stuff.
1544 namest
= XFS_DIR_LEAF_NAMESTRUCT(leaf
, be16_to_cpu(entry
->nameidx
));
1545 memset((char *)namest
, 0, entsize
);
1546 xfs_da_log_buf(trans
, bp
, XFS_DA_LOGRANGE(leaf
, namest
, entsize
));
1548 be16_add(&hdr
->namebytes
, -(entry
->namelen
));
1549 tmp
= (be16_to_cpu(hdr
->count
) - index
) * (uint
)sizeof(xfs_dir_leaf_entry_t
);
1550 memmove(entry
, entry
+ 1, tmp
);
1551 be16_add(&hdr
->count
, -1);
1552 xfs_da_log_buf(trans
, bp
,
1553 XFS_DA_LOGRANGE(leaf
, entry
, tmp
+ (uint
)sizeof(*entry
)));
1554 entry
= &leaf
->entries
[be16_to_cpu(hdr
->count
)];
1555 memset((char *)entry
, 0, sizeof(xfs_dir_leaf_entry_t
));
1558 * If we removed the first entry, re-find the first used byte
1559 * in the name area. Note that if the entry was the "firstused",
1560 * then we don't have a "hole" in our block resulting from
1561 * removing the name.
1564 tmp
= XFS_LBSIZE(mp
);
1565 entry
= &leaf
->entries
[0];
1566 for (i
= be16_to_cpu(hdr
->count
)-1; i
>= 0; entry
++, i
--) {
1567 ASSERT(be16_to_cpu(entry
->nameidx
) >=
1568 be16_to_cpu(hdr
->firstused
));
1569 ASSERT(be16_to_cpu(entry
->nameidx
) < XFS_LBSIZE(mp
));
1570 if (be16_to_cpu(entry
->nameidx
) < tmp
)
1571 tmp
= be16_to_cpu(entry
->nameidx
);
1573 hdr
->firstused
= cpu_to_be16(tmp
);
1574 if (!hdr
->firstused
)
1575 hdr
->firstused
= cpu_to_be16(tmp
- 1);
1577 hdr
->holes
= 1; /* mark as needing compaction */
1580 xfs_da_log_buf(trans
, bp
, XFS_DA_LOGRANGE(leaf
, hdr
, sizeof(*hdr
)));
1583 * Check if leaf is less than 50% full, caller may want to
1584 * "join" the leaf with a sibling if so.
1586 tmp
= (uint
)sizeof(xfs_dir_leaf_hdr_t
);
1587 tmp
+= be16_to_cpu(leaf
->hdr
.count
) * (uint
)sizeof(xfs_dir_leaf_entry_t
);
1588 tmp
+= be16_to_cpu(leaf
->hdr
.count
) * ((uint
)sizeof(xfs_dir_leaf_name_t
) - 1);
1589 tmp
+= be16_to_cpu(leaf
->hdr
.namebytes
);
1590 if (tmp
< mp
->m_dir_magicpct
)
1591 return 1; /* leaf is < 37% full */
1596 * Move all the directory entries from drop_leaf into save_leaf.
1599 xfs_dir_leaf_unbalance(xfs_da_state_t
*state
, xfs_da_state_blk_t
*drop_blk
,
1600 xfs_da_state_blk_t
*save_blk
)
1602 xfs_dir_leafblock_t
*drop_leaf
, *save_leaf
, *tmp_leaf
;
1603 xfs_dir_leaf_hdr_t
*drop_hdr
, *save_hdr
, *tmp_hdr
;
1608 * Set up environment.
1611 ASSERT(drop_blk
->magic
== XFS_DIR_LEAF_MAGIC
);
1612 ASSERT(save_blk
->magic
== XFS_DIR_LEAF_MAGIC
);
1613 drop_leaf
= drop_blk
->bp
->data
;
1614 save_leaf
= save_blk
->bp
->data
;
1615 ASSERT(be16_to_cpu(drop_leaf
->hdr
.info
.magic
) == XFS_DIR_LEAF_MAGIC
);
1616 ASSERT(be16_to_cpu(save_leaf
->hdr
.info
.magic
) == XFS_DIR_LEAF_MAGIC
);
1617 drop_hdr
= &drop_leaf
->hdr
;
1618 save_hdr
= &save_leaf
->hdr
;
1621 * Save last hashval from dying block for later Btree fixup.
1623 drop_blk
->hashval
= be32_to_cpu(drop_leaf
->entries
[
1624 be16_to_cpu(drop_leaf
->hdr
.count
)-1].hashval
);
1627 * Check if we need a temp buffer, or can we do it in place.
1628 * Note that we don't check "leaf" for holes because we will
1629 * always be dropping it, toosmall() decided that for us already.
1631 if (save_hdr
->holes
== 0) {
1633 * dest leaf has no holes, so we add there. May need
1634 * to make some room in the entry array.
1636 if (xfs_dir_leaf_order(save_blk
->bp
, drop_blk
->bp
)) {
1637 xfs_dir_leaf_moveents(drop_leaf
, 0, save_leaf
, 0,
1638 be16_to_cpu(drop_hdr
->count
), mp
);
1640 xfs_dir_leaf_moveents(drop_leaf
, 0,
1641 save_leaf
, be16_to_cpu(save_hdr
->count
),
1642 be16_to_cpu(drop_hdr
->count
), mp
);
1646 * Destination has holes, so we make a temporary copy
1647 * of the leaf and add them both to that.
1649 tmpbuffer
= kmem_alloc(state
->blocksize
, KM_SLEEP
);
1650 ASSERT(tmpbuffer
!= NULL
);
1651 memset(tmpbuffer
, 0, state
->blocksize
);
1652 tmp_leaf
= (xfs_dir_leafblock_t
*)tmpbuffer
;
1653 tmp_hdr
= &tmp_leaf
->hdr
;
1654 tmp_hdr
->info
= save_hdr
->info
; /* struct copy */
1656 tmp_hdr
->firstused
= cpu_to_be16(state
->blocksize
);
1657 if (!tmp_hdr
->firstused
)
1658 tmp_hdr
->firstused
= cpu_to_be16(state
->blocksize
- 1);
1659 tmp_hdr
->namebytes
= 0;
1660 if (xfs_dir_leaf_order(save_blk
->bp
, drop_blk
->bp
)) {
1661 xfs_dir_leaf_moveents(drop_leaf
, 0, tmp_leaf
, 0,
1662 be16_to_cpu(drop_hdr
->count
), mp
);
1663 xfs_dir_leaf_moveents(save_leaf
, 0,
1664 tmp_leaf
, be16_to_cpu(tmp_leaf
->hdr
.count
),
1665 be16_to_cpu(save_hdr
->count
), mp
);
1667 xfs_dir_leaf_moveents(save_leaf
, 0, tmp_leaf
, 0,
1668 be16_to_cpu(save_hdr
->count
), mp
);
1669 xfs_dir_leaf_moveents(drop_leaf
, 0,
1670 tmp_leaf
, be16_to_cpu(tmp_leaf
->hdr
.count
),
1671 be16_to_cpu(drop_hdr
->count
), mp
);
1673 memcpy(save_leaf
, tmp_leaf
, state
->blocksize
);
1674 kmem_free(tmpbuffer
, state
->blocksize
);
1677 xfs_da_log_buf(state
->args
->trans
, save_blk
->bp
, 0,
1678 state
->blocksize
- 1);
1681 * Copy out last hashval in each block for B-tree code.
1683 save_blk
->hashval
= be32_to_cpu(save_leaf
->entries
[
1684 be16_to_cpu(save_leaf
->hdr
.count
)-1].hashval
);
1687 /*========================================================================
1688 * Routines used for finding things in the Btree.
1689 *========================================================================*/
1692 * Look up a name in a leaf directory structure.
1693 * This is the internal routine, it uses the caller's buffer.
1695 * Note that duplicate keys are allowed, but only check within the
1696 * current leaf node. The Btree code must check in adjacent leaf nodes.
1698 * Return in *index the index into the entry[] array of either the found
1699 * entry, or where the entry should have been (insert before that entry).
1701 * Don't change the args->inumber unless we find the filename.
1704 xfs_dir_leaf_lookup_int(xfs_dabuf_t
*bp
, xfs_da_args_t
*args
, int *index
)
1706 xfs_dir_leafblock_t
*leaf
;
1707 xfs_dir_leaf_entry_t
*entry
;
1708 xfs_dir_leaf_name_t
*namest
;
1710 xfs_dahash_t hashval
;
1713 ASSERT(be16_to_cpu(leaf
->hdr
.info
.magic
) == XFS_DIR_LEAF_MAGIC
);
1714 ASSERT(be16_to_cpu(leaf
->hdr
.count
) < (XFS_LBSIZE(args
->dp
->i_mount
)/8));
1717 * Binary search. (note: small blocks will skip this loop)
1719 hashval
= args
->hashval
;
1720 probe
= span
= be16_to_cpu(leaf
->hdr
.count
) / 2;
1721 for (entry
= &leaf
->entries
[probe
]; span
> 4;
1722 entry
= &leaf
->entries
[probe
]) {
1724 if (be32_to_cpu(entry
->hashval
) < hashval
)
1726 else if (be32_to_cpu(entry
->hashval
) > hashval
)
1731 ASSERT((probe
>= 0) && \
1732 ((!leaf
->hdr
.count
) || (probe
< be16_to_cpu(leaf
->hdr
.count
))));
1733 ASSERT((span
<= 4) || (be32_to_cpu(entry
->hashval
) == hashval
));
1736 * Since we may have duplicate hashval's, find the first matching
1737 * hashval in the leaf.
1739 while ((probe
> 0) && (be32_to_cpu(entry
->hashval
) >= hashval
)) {
1743 while ((probe
< be16_to_cpu(leaf
->hdr
.count
)) &&
1744 (be32_to_cpu(entry
->hashval
) < hashval
)) {
1748 if ((probe
== be16_to_cpu(leaf
->hdr
.count
)) ||
1749 (be32_to_cpu(entry
->hashval
) != hashval
)) {
1751 ASSERT(args
->oknoent
);
1752 return XFS_ERROR(ENOENT
);
1756 * Duplicate keys may be present, so search all of them for a match.
1758 while ((probe
< be16_to_cpu(leaf
->hdr
.count
)) &&
1759 (be32_to_cpu(entry
->hashval
) == hashval
)) {
1760 namest
= XFS_DIR_LEAF_NAMESTRUCT(leaf
, be16_to_cpu(entry
->nameidx
));
1761 if (entry
->namelen
== args
->namelen
&&
1762 namest
->name
[0] == args
->name
[0] &&
1763 memcmp(args
->name
, namest
->name
, args
->namelen
) == 0) {
1764 XFS_DIR_SF_GET_DIRINO(&namest
->inumber
, &args
->inumber
);
1766 return XFS_ERROR(EEXIST
);
1772 ASSERT(probe
== be16_to_cpu(leaf
->hdr
.count
) || args
->oknoent
);
1773 return XFS_ERROR(ENOENT
);
1776 /*========================================================================
1778 *========================================================================*/
1781 * Move the indicated entries from one leaf to another.
1782 * NOTE: this routine modifies both source and destination leaves.
1786 xfs_dir_leaf_moveents(xfs_dir_leafblock_t
*leaf_s
, int start_s
,
1787 xfs_dir_leafblock_t
*leaf_d
, int start_d
,
1788 int count
, xfs_mount_t
*mp
)
1790 xfs_dir_leaf_hdr_t
*hdr_s
, *hdr_d
;
1791 xfs_dir_leaf_entry_t
*entry_s
, *entry_d
;
1795 * Check for nothing to do.
1801 * Set up environment.
1803 ASSERT(be16_to_cpu(leaf_s
->hdr
.info
.magic
) == XFS_DIR_LEAF_MAGIC
);
1804 ASSERT(be16_to_cpu(leaf_d
->hdr
.info
.magic
) == XFS_DIR_LEAF_MAGIC
);
1805 hdr_s
= &leaf_s
->hdr
;
1806 hdr_d
= &leaf_d
->hdr
;
1807 ASSERT(hdr_s
->count
&& (be16_to_cpu(hdr_s
->count
) < (XFS_LBSIZE(mp
)/8)));
1808 ASSERT(be16_to_cpu(hdr_s
->firstused
) >=
1809 ((be16_to_cpu(hdr_s
->count
)*sizeof(*entry_s
))+sizeof(*hdr_s
)));
1810 ASSERT(be16_to_cpu(hdr_d
->count
) < (XFS_LBSIZE(mp
)/8));
1811 ASSERT(be16_to_cpu(hdr_d
->firstused
) >=
1812 ((be16_to_cpu(hdr_d
->count
)*sizeof(*entry_d
))+sizeof(*hdr_d
)));
1814 ASSERT(start_s
< be16_to_cpu(hdr_s
->count
));
1815 ASSERT(start_d
<= be16_to_cpu(hdr_d
->count
));
1816 ASSERT(count
<= be16_to_cpu(hdr_s
->count
));
1819 * Move the entries in the destination leaf up to make a hole?
1821 if (start_d
< be16_to_cpu(hdr_d
->count
)) {
1822 tmp
= be16_to_cpu(hdr_d
->count
) - start_d
;
1823 tmp
*= (uint
)sizeof(xfs_dir_leaf_entry_t
);
1824 entry_s
= &leaf_d
->entries
[start_d
];
1825 entry_d
= &leaf_d
->entries
[start_d
+ count
];
1826 memcpy(entry_d
, entry_s
, tmp
);
1830 * Copy all entry's in the same (sorted) order,
1831 * but allocate filenames packed and in sequence.
1833 entry_s
= &leaf_s
->entries
[start_s
];
1834 entry_d
= &leaf_d
->entries
[start_d
];
1835 for (i
= 0; i
< count
; entry_s
++, entry_d
++, i
++) {
1836 ASSERT(be16_to_cpu(entry_s
->nameidx
) >=
1837 be16_to_cpu(hdr_s
->firstused
));
1838 tmp
= XFS_DIR_LEAF_ENTSIZE_BYENTRY(entry_s
);
1839 be16_add(&hdr_d
->firstused
, -(tmp
));
1840 entry_d
->hashval
= entry_s
->hashval
;
1841 entry_d
->nameidx
= hdr_d
->firstused
;
1842 entry_d
->namelen
= entry_s
->namelen
;
1843 ASSERT(be16_to_cpu(entry_d
->nameidx
) + tmp
<= XFS_LBSIZE(mp
));
1844 memcpy(XFS_DIR_LEAF_NAMESTRUCT(leaf_d
, be16_to_cpu(entry_d
->nameidx
)),
1845 XFS_DIR_LEAF_NAMESTRUCT(leaf_s
, be16_to_cpu(entry_s
->nameidx
)), tmp
);
1846 ASSERT(be16_to_cpu(entry_s
->nameidx
) + tmp
<= XFS_LBSIZE(mp
));
1847 memset((char *)XFS_DIR_LEAF_NAMESTRUCT(leaf_s
,
1848 be16_to_cpu(entry_s
->nameidx
)), 0, tmp
);
1849 be16_add(&hdr_s
->namebytes
, -(entry_d
->namelen
));
1850 be16_add(&hdr_d
->namebytes
, entry_d
->namelen
);
1851 be16_add(&hdr_s
->count
, -1);
1852 be16_add(&hdr_d
->count
, +1);
1853 tmp
= be16_to_cpu(hdr_d
->count
) * (uint
)sizeof(xfs_dir_leaf_entry_t
)
1854 + (uint
)sizeof(xfs_dir_leaf_hdr_t
);
1855 ASSERT(be16_to_cpu(hdr_d
->firstused
) >= tmp
);
1860 * Zero out the entries we just copied.
1862 if (start_s
== be16_to_cpu(hdr_s
->count
)) {
1863 tmp
= count
* (uint
)sizeof(xfs_dir_leaf_entry_t
);
1864 entry_s
= &leaf_s
->entries
[start_s
];
1865 ASSERT((char *)entry_s
+ tmp
<= (char *)leaf_s
+ XFS_LBSIZE(mp
));
1866 memset((char *)entry_s
, 0, tmp
);
1869 * Move the remaining entries down to fill the hole,
1870 * then zero the entries at the top.
1872 tmp
= be16_to_cpu(hdr_s
->count
) - count
;
1873 tmp
*= (uint
)sizeof(xfs_dir_leaf_entry_t
);
1874 entry_s
= &leaf_s
->entries
[start_s
+ count
];
1875 entry_d
= &leaf_s
->entries
[start_s
];
1876 memcpy(entry_d
, entry_s
, tmp
);
1878 tmp
= count
* (uint
)sizeof(xfs_dir_leaf_entry_t
);
1879 entry_s
= &leaf_s
->entries
[be16_to_cpu(hdr_s
->count
)];
1880 ASSERT((char *)entry_s
+ tmp
<= (char *)leaf_s
+ XFS_LBSIZE(mp
));
1881 memset((char *)entry_s
, 0, tmp
);
1885 * Fill in the freemap information
1887 hdr_d
->freemap
[0].base
= cpu_to_be16(sizeof(xfs_dir_leaf_hdr_t
) +
1888 be16_to_cpu(hdr_d
->count
) * sizeof(xfs_dir_leaf_entry_t
));
1889 hdr_d
->freemap
[0].size
= cpu_to_be16(be16_to_cpu(hdr_d
->firstused
) -
1890 be16_to_cpu(hdr_d
->freemap
[0].base
));
1891 hdr_d
->freemap
[1].base
= 0;
1892 hdr_d
->freemap
[1].size
= 0;
1893 hdr_d
->freemap
[2].base
= 0;
1894 hdr_d
->freemap
[2].size
= 0;
1895 hdr_s
->holes
= 1; /* leaf may not be compact */
1899 * Compare two leaf blocks "order".
1902 xfs_dir_leaf_order(xfs_dabuf_t
*leaf1_bp
, xfs_dabuf_t
*leaf2_bp
)
1904 xfs_dir_leafblock_t
*leaf1
, *leaf2
;
1906 leaf1
= leaf1_bp
->data
;
1907 leaf2
= leaf2_bp
->data
;
1908 ASSERT((be16_to_cpu(leaf1
->hdr
.info
.magic
) == XFS_DIR_LEAF_MAGIC
) &&
1909 (be16_to_cpu(leaf2
->hdr
.info
.magic
) == XFS_DIR_LEAF_MAGIC
));
1910 if (leaf1
->hdr
.count
&& leaf2
->hdr
.count
&&
1911 ((be32_to_cpu(leaf2
->entries
[0].hashval
) <
1912 be32_to_cpu(leaf1
->entries
[0 ].hashval
)) ||
1913 (be32_to_cpu(leaf2
->entries
[
1914 be16_to_cpu(leaf2
->hdr
.count
)-1].hashval
) <
1915 be32_to_cpu(leaf1
->entries
[
1916 be16_to_cpu(leaf1
->hdr
.count
)-1].hashval
)))) {
1923 * Pick up the last hashvalue from a leaf block.
1926 xfs_dir_leaf_lasthash(xfs_dabuf_t
*bp
, int *count
)
1928 xfs_dir_leafblock_t
*leaf
;
1931 ASSERT(be16_to_cpu(leaf
->hdr
.info
.magic
) == XFS_DIR_LEAF_MAGIC
);
1933 *count
= be16_to_cpu(leaf
->hdr
.count
);
1934 if (!leaf
->hdr
.count
)
1936 return be32_to_cpu(leaf
->entries
[be16_to_cpu(leaf
->hdr
.count
)-1].hashval
);
1940 * Copy out directory entries for getdents(), for leaf directories.
1943 xfs_dir_leaf_getdents_int(
1953 xfs_dir_leafblock_t
*leaf
;
1954 xfs_dir_leaf_entry_t
*entry
;
1955 xfs_dir_leaf_name_t
*namest
;
1956 int entno
, want_entno
, i
, nextentno
;
1958 xfs_dahash_t cookhash
;
1959 xfs_dahash_t nexthash
= 0;
1960 #if (BITS_PER_LONG == 32)
1961 xfs_dahash_t lasthash
= XFS_DA_MAXHASH
;
1963 xfs_dir_put_args_t p
;
1967 if (be16_to_cpu(leaf
->hdr
.info
.magic
) != XFS_DIR_LEAF_MAGIC
) {
1969 return XFS_ERROR(ENOENT
); /* XXX wrong code */
1972 want_entno
= XFS_DA_COOKIE_ENTRY(mp
, uio
->uio_offset
);
1974 cookhash
= XFS_DA_COOKIE_HASH(mp
, uio
->uio_offset
);
1976 xfs_dir_trace_g_dul("leaf: start", dp
, uio
, leaf
);
1979 * Re-find our place.
1981 for (i
= entno
= 0, entry
= &leaf
->entries
[0];
1982 i
< be16_to_cpu(leaf
->hdr
.count
); entry
++, i
++) {
1984 namest
= XFS_DIR_LEAF_NAMESTRUCT(leaf
,
1985 be16_to_cpu(entry
->nameidx
));
1988 ((char *)namest
< (char *)leaf
) ||
1989 ((char *)namest
>= (char *)leaf
+ XFS_LBSIZE(mp
)))) {
1990 XFS_CORRUPTION_ERROR("xfs_dir_leaf_getdents_int(1)",
1991 XFS_ERRLEVEL_LOW
, mp
, leaf
);
1992 xfs_dir_trace_g_du("leaf: corrupted", dp
, uio
);
1993 return XFS_ERROR(EFSCORRUPTED
);
1995 if (be32_to_cpu(entry
->hashval
) >= cookhash
) {
1996 if (entno
< want_entno
&&
1997 be32_to_cpu(entry
->hashval
) == cookhash
) {
1999 * Trying to get to a particular offset in a
2000 * run of equal-hashval entries.
2003 } else if (want_entno
> 0 && entno
== want_entno
&&
2004 be32_to_cpu(entry
->hashval
) == cookhash
) {
2013 if (i
== be16_to_cpu(leaf
->hdr
.count
)) {
2014 xfs_dir_trace_g_du("leaf: hash not found", dp
, uio
);
2015 if (!leaf
->hdr
.info
.forw
)
2017 XFS_DA_MAKE_COOKIE(mp
, 0, 0, XFS_DA_MAXHASH
);
2019 * Don't set uio_offset if there's another block:
2020 * the node code will be setting uio_offset anyway.
2025 xfs_dir_trace_g_due("leaf: hash found", dp
, uio
, entry
);
2032 * We're synchronized, start copying entries out to the user.
2034 for (; entno
>= 0 && i
< be16_to_cpu(leaf
->hdr
.count
);
2035 entry
++, i
++, (entno
= nextentno
)) {
2036 int lastresid
=0, retval
;
2037 xfs_dircook_t lastoffset
;
2038 xfs_dahash_t thishash
;
2041 * Check for a damaged directory leaf block and pick up
2042 * the inode number from this entry.
2044 namest
= XFS_DIR_LEAF_NAMESTRUCT(leaf
,
2045 be16_to_cpu(entry
->nameidx
));
2048 ((char *)namest
< (char *)leaf
) ||
2049 ((char *)namest
>= (char *)leaf
+ XFS_LBSIZE(mp
)))) {
2050 XFS_CORRUPTION_ERROR("xfs_dir_leaf_getdents_int(2)",
2051 XFS_ERRLEVEL_LOW
, mp
, leaf
);
2052 xfs_dir_trace_g_du("leaf: corrupted", dp
, uio
);
2053 return XFS_ERROR(EFSCORRUPTED
);
2056 xfs_dir_trace_g_duc("leaf: middle cookie ",
2059 if (i
< (be16_to_cpu(leaf
->hdr
.count
) - 1)) {
2060 nexthash
= be32_to_cpu(entry
[1].hashval
);
2062 if (nexthash
== be32_to_cpu(entry
->hashval
))
2063 nextentno
= entno
+ 1;
2066 XFS_PUT_COOKIE(p
.cook
, mp
, bno
, nextentno
, nexthash
);
2067 xfs_dir_trace_g_duc("leaf: middle cookie ",
2070 } else if ((thishash
= be32_to_cpu(leaf
->hdr
.info
.forw
))) {
2072 xfs_dir_leafblock_t
*leaf2
;
2074 ASSERT(nextda
!= -1);
2076 retval
= xfs_da_read_buf(dp
->i_transp
, dp
, thishash
,
2077 nextda
, &bp2
, XFS_DATA_FORK
);
2081 ASSERT(bp2
!= NULL
);
2086 (be16_to_cpu(leaf2
->hdr
.info
.magic
)
2087 != XFS_DIR_LEAF_MAGIC
)
2088 || (be32_to_cpu(leaf2
->hdr
.info
.back
)
2089 != bno
))) { /* GROT */
2090 XFS_CORRUPTION_ERROR("xfs_dir_leaf_getdents_int(3)",
2091 XFS_ERRLEVEL_LOW
, mp
,
2093 xfs_da_brelse(dp
->i_transp
, bp2
);
2095 return XFS_ERROR(EFSCORRUPTED
);
2098 nexthash
= be32_to_cpu(leaf2
->entries
[0].hashval
);
2100 XFS_PUT_COOKIE(p
.cook
, mp
, thishash
, 0, nexthash
);
2101 xfs_da_brelse(dp
->i_transp
, bp2
);
2102 xfs_dir_trace_g_duc("leaf: next blk cookie",
2106 XFS_PUT_COOKIE(p
.cook
, mp
, 0, 0, XFS_DA_MAXHASH
);
2110 * Save off the cookie so we can fall back should the
2111 * 'put' into the outgoing buffer fails. To handle a run
2112 * of equal-hashvals, the off_t structure on 64bit
2113 * builds has entno built into the cookie to ID the
2114 * entry. On 32bit builds, we only have space for the
2115 * hashval so we can't ID specific entries within a group
2116 * of same hashval entries. For this, lastoffset is set
2117 * to the first in the run of equal hashvals so we don't
2118 * include any entries unless we can include all entries
2119 * that share the same hashval. Hopefully the buffer
2120 * provided is big enough to handle it (see pv763517).
2122 thishash
= be32_to_cpu(entry
->hashval
);
2123 #if (BITS_PER_LONG == 32)
2124 if (thishash
!= lasthash
) {
2125 XFS_PUT_COOKIE(lastoffset
, mp
, bno
, entno
, thishash
);
2126 lastresid
= uio
->uio_resid
;
2127 lasthash
= thishash
;
2129 xfs_dir_trace_g_duc("leaf: DUP COOKIES, skipped",
2133 XFS_PUT_COOKIE(lastoffset
, mp
, bno
, entno
, thishash
);
2134 lastresid
= uio
->uio_resid
;
2135 #endif /* BITS_PER_LONG == 32 */
2138 * Put the current entry into the outgoing buffer. If we fail
2139 * then restore the UIO to the first entry in the current
2140 * run of equal-hashval entries (probably one 1 entry long).
2142 p
.ino
= XFS_GET_DIR_INO8(namest
->inumber
);
2144 p
.ino
+= mp
->m_inoadd
;
2146 p
.name
= (char *)namest
->name
;
2147 p
.namelen
= entry
->namelen
;
2152 uio
->uio_offset
= lastoffset
.o
;
2153 uio
->uio_resid
= lastresid
;
2157 xfs_dir_trace_g_du("leaf: E-O-B", dp
, uio
);
2163 uio
->uio_offset
= p
.cook
.o
;
2167 xfs_dir_trace_g_du("leaf: E-O-F", dp
, uio
);
2173 * Format a dirent64 structure and copy it out the the user's buffer.
2176 xfs_dir_put_dirent64_direct(xfs_dir_put_args_t
*pa
)
2179 int reclen
, namelen
;
2183 namelen
= pa
->namelen
;
2184 reclen
= DIRENTSIZE(namelen
);
2186 if (reclen
> uio
->uio_resid
) {
2190 iovp
= uio
->uio_iov
;
2191 idbp
= (xfs_dirent_t
*)iovp
->iov_base
;
2192 iovp
->iov_base
= (char *)idbp
+ reclen
;
2193 iovp
->iov_len
-= reclen
;
2194 uio
->uio_resid
-= reclen
;
2195 idbp
->d_reclen
= reclen
;
2196 idbp
->d_ino
= pa
->ino
;
2197 idbp
->d_off
= pa
->cook
.o
;
2198 idbp
->d_name
[namelen
] = '\0';
2200 memcpy(idbp
->d_name
, pa
->name
, namelen
);
2205 * Format a dirent64 structure and copy it out the the user's buffer.
2208 xfs_dir_put_dirent64_uio(xfs_dir_put_args_t
*pa
)
2210 int retval
, reclen
, namelen
;
2214 namelen
= pa
->namelen
;
2215 reclen
= DIRENTSIZE(namelen
);
2217 if (reclen
> uio
->uio_resid
) {
2222 idbp
->d_reclen
= reclen
;
2223 idbp
->d_ino
= pa
->ino
;
2224 idbp
->d_off
= pa
->cook
.o
;
2225 idbp
->d_name
[namelen
] = '\0';
2226 memcpy(idbp
->d_name
, pa
->name
, namelen
);
2227 retval
= uio_read((caddr_t
)idbp
, reclen
, uio
);
2228 pa
->done
= (retval
== 0);