1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (C) 2017 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_trans_resv.h"
11 #include "xfs_mount.h"
12 #include "xfs_defer.h"
13 #include "xfs_btree.h"
15 #include "xfs_log_format.h"
16 #include "xfs_trans.h"
18 #include "xfs_inode.h"
19 #include "xfs_alloc.h"
20 #include "xfs_ialloc.h"
21 #include "xfs_ialloc_btree.h"
22 #include "xfs_icache.h"
25 #include "xfs_trans_priv.h"
26 #include "scrub/xfs_scrub.h"
27 #include "scrub/scrub.h"
28 #include "scrub/common.h"
29 #include "scrub/btree.h"
30 #include "scrub/trace.h"
33 * Set us up to scrub inode btrees.
34 * If we detect a discrepancy between the inobt and the inode,
35 * try again after forcing logged inode cores out to disk.
38 xfs_scrub_setup_ag_iallocbt(
39 struct xfs_scrub_context
*sc
,
42 return xfs_scrub_setup_ag_btree(sc
, ip
, sc
->try_harder
);
45 /* Inode btree scrubber. */
48 * If we're checking the finobt, cross-reference with the inobt.
49 * Otherwise we're checking the inobt; if there is an finobt, make sure
50 * we have a record or not depending on freecount.
53 xfs_scrub_iallocbt_chunk_xref_other(
54 struct xfs_scrub_context
*sc
,
55 struct xfs_inobt_rec_incore
*irec
,
58 struct xfs_btree_cur
**pcur
;
62 if (sc
->sm
->sm_type
== XFS_SCRUB_TYPE_FINOBT
)
63 pcur
= &sc
->sa
.ino_cur
;
65 pcur
= &sc
->sa
.fino_cur
;
68 error
= xfs_ialloc_has_inode_record(*pcur
, agino
, agino
, &has_irec
);
69 if (!xfs_scrub_should_check_xref(sc
, &error
, pcur
))
71 if (((irec
->ir_freecount
> 0 && !has_irec
) ||
72 (irec
->ir_freecount
== 0 && has_irec
)))
73 xfs_scrub_btree_xref_set_corrupt(sc
, *pcur
, 0);
76 /* Cross-reference with the other btrees. */
78 xfs_scrub_iallocbt_chunk_xref(
79 struct xfs_scrub_context
*sc
,
80 struct xfs_inobt_rec_incore
*irec
,
85 struct xfs_owner_info oinfo
;
87 if (sc
->sm
->sm_flags
& XFS_SCRUB_OFLAG_CORRUPT
)
90 xfs_scrub_xref_is_used_space(sc
, agbno
, len
);
91 xfs_scrub_iallocbt_chunk_xref_other(sc
, irec
, agino
);
92 xfs_rmap_ag_owner(&oinfo
, XFS_RMAP_OWN_INODES
);
93 xfs_scrub_xref_is_owned_by(sc
, agbno
, len
, &oinfo
);
94 xfs_scrub_xref_is_not_shared(sc
, agbno
, len
);
97 /* Is this chunk worth checking? */
99 xfs_scrub_iallocbt_chunk(
100 struct xfs_scrub_btree
*bs
,
101 struct xfs_inobt_rec_incore
*irec
,
105 struct xfs_mount
*mp
= bs
->cur
->bc_mp
;
106 xfs_agnumber_t agno
= bs
->cur
->bc_private
.a
.agno
;
109 bno
= XFS_AGINO_TO_AGBNO(mp
, agino
);
110 if (bno
+ len
<= bno
||
111 !xfs_verify_agbno(mp
, agno
, bno
) ||
112 !xfs_verify_agbno(mp
, agno
, bno
+ len
- 1))
113 xfs_scrub_btree_set_corrupt(bs
->sc
, bs
->cur
, 0);
115 xfs_scrub_iallocbt_chunk_xref(bs
->sc
, irec
, agino
, bno
, len
);
120 /* Count the number of free inodes. */
122 xfs_scrub_iallocbt_freecount(
123 xfs_inofree_t freemask
)
125 BUILD_BUG_ON(sizeof(freemask
) != sizeof(__u64
));
126 return hweight64(freemask
);
129 /* Check a particular inode with ir_free. */
131 xfs_scrub_iallocbt_check_cluster_freemask(
132 struct xfs_scrub_btree
*bs
,
134 xfs_agino_t chunkino
,
135 xfs_agino_t clusterino
,
136 struct xfs_inobt_rec_incore
*irec
,
139 struct xfs_dinode
*dip
;
140 struct xfs_mount
*mp
= bs
->cur
->bc_mp
;
141 bool inode_is_free
= false;
146 if (xfs_scrub_should_terminate(bs
->sc
, &error
))
149 dip
= xfs_buf_offset(bp
, clusterino
* mp
->m_sb
.sb_inodesize
);
150 if (be16_to_cpu(dip
->di_magic
) != XFS_DINODE_MAGIC
||
151 (dip
->di_version
>= 3 &&
152 be64_to_cpu(dip
->di_ino
) != fsino
+ clusterino
)) {
153 xfs_scrub_btree_set_corrupt(bs
->sc
, bs
->cur
, 0);
157 if (irec
->ir_free
& XFS_INOBT_MASK(chunkino
+ clusterino
))
158 inode_is_free
= true;
159 error
= xfs_icache_inode_is_allocated(mp
, bs
->cur
->bc_tp
,
160 fsino
+ clusterino
, &inuse
);
161 if (error
== -ENODATA
) {
162 /* Not cached, just read the disk buffer */
163 freemask_ok
= inode_is_free
^ !!(dip
->di_mode
);
164 if (!bs
->sc
->try_harder
&& !freemask_ok
)
166 } else if (error
< 0) {
168 * Inode is only half assembled, or there was an IO error,
169 * or the verifier failed, so don't bother trying to check.
170 * The inode scrubber can deal with this.
174 /* Inode is all there. */
175 freemask_ok
= inode_is_free
^ inuse
;
178 xfs_scrub_btree_set_corrupt(bs
->sc
, bs
->cur
, 0);
183 /* Make sure the free mask is consistent with what the inodes think. */
185 xfs_scrub_iallocbt_check_freemask(
186 struct xfs_scrub_btree
*bs
,
187 struct xfs_inobt_rec_incore
*irec
)
189 struct xfs_owner_info oinfo
;
190 struct xfs_imap imap
;
191 struct xfs_mount
*mp
= bs
->cur
->bc_mp
;
192 struct xfs_dinode
*dip
;
195 xfs_agino_t nr_inodes
;
197 xfs_agino_t chunkino
;
198 xfs_agino_t clusterino
;
200 int blks_per_cluster
;
202 uint16_t ir_holemask
;
205 /* Make sure the freemask matches the inode records. */
206 blks_per_cluster
= xfs_icluster_size_fsb(mp
);
207 nr_inodes
= XFS_OFFBNO_TO_AGINO(mp
, blks_per_cluster
, 0);
208 xfs_rmap_ag_owner(&oinfo
, XFS_RMAP_OWN_INODES
);
210 for (agino
= irec
->ir_startino
;
211 agino
< irec
->ir_startino
+ XFS_INODES_PER_CHUNK
;
212 agino
+= blks_per_cluster
* mp
->m_sb
.sb_inopblock
) {
213 fsino
= XFS_AGINO_TO_INO(mp
, bs
->cur
->bc_private
.a
.agno
, agino
);
214 chunkino
= agino
- irec
->ir_startino
;
215 agbno
= XFS_AGINO_TO_AGBNO(mp
, agino
);
217 /* Compute the holemask mask for this cluster. */
218 for (clusterino
= 0, holemask
= 0; clusterino
< nr_inodes
;
219 clusterino
+= XFS_INODES_PER_HOLEMASK_BIT
)
220 holemask
|= XFS_INOBT_MASK((chunkino
+ clusterino
) /
221 XFS_INODES_PER_HOLEMASK_BIT
);
223 /* The whole cluster must be a hole or not a hole. */
224 ir_holemask
= (irec
->ir_holemask
& holemask
);
225 if (ir_holemask
!= holemask
&& ir_holemask
!= 0) {
226 xfs_scrub_btree_set_corrupt(bs
->sc
, bs
->cur
, 0);
230 /* If any part of this is a hole, skip it. */
232 xfs_scrub_xref_is_not_owned_by(bs
->sc
, agbno
,
233 blks_per_cluster
, &oinfo
);
237 xfs_scrub_xref_is_owned_by(bs
->sc
, agbno
, blks_per_cluster
,
240 /* Grab the inode cluster buffer. */
241 imap
.im_blkno
= XFS_AGB_TO_DADDR(mp
, bs
->cur
->bc_private
.a
.agno
,
243 imap
.im_len
= XFS_FSB_TO_BB(mp
, blks_per_cluster
);
246 error
= xfs_imap_to_bp(mp
, bs
->cur
->bc_tp
, &imap
,
248 if (!xfs_scrub_btree_xref_process_error(bs
->sc
, bs
->cur
, 0,
252 /* Which inodes are free? */
253 for (clusterino
= 0; clusterino
< nr_inodes
; clusterino
++) {
254 error
= xfs_scrub_iallocbt_check_cluster_freemask(bs
,
255 fsino
, chunkino
, clusterino
, irec
, bp
);
257 xfs_trans_brelse(bs
->cur
->bc_tp
, bp
);
262 xfs_trans_brelse(bs
->cur
->bc_tp
, bp
);
268 /* Scrub an inobt/finobt record. */
270 xfs_scrub_iallocbt_rec(
271 struct xfs_scrub_btree
*bs
,
272 union xfs_btree_rec
*rec
)
274 struct xfs_mount
*mp
= bs
->cur
->bc_mp
;
275 xfs_filblks_t
*inode_blocks
= bs
->private;
276 struct xfs_inobt_rec_incore irec
;
278 xfs_agnumber_t agno
= bs
->cur
->bc_private
.a
.agno
;
285 unsigned int real_freecount
;
288 xfs_inobt_btrec_to_irec(mp
, rec
, &irec
);
290 if (irec
.ir_count
> XFS_INODES_PER_CHUNK
||
291 irec
.ir_freecount
> XFS_INODES_PER_CHUNK
)
292 xfs_scrub_btree_set_corrupt(bs
->sc
, bs
->cur
, 0);
294 real_freecount
= irec
.ir_freecount
+
295 (XFS_INODES_PER_CHUNK
- irec
.ir_count
);
296 if (real_freecount
!= xfs_scrub_iallocbt_freecount(irec
.ir_free
))
297 xfs_scrub_btree_set_corrupt(bs
->sc
, bs
->cur
, 0);
299 agino
= irec
.ir_startino
;
300 /* Record has to be properly aligned within the AG. */
301 if (!xfs_verify_agino(mp
, agno
, agino
) ||
302 !xfs_verify_agino(mp
, agno
, agino
+ XFS_INODES_PER_CHUNK
- 1)) {
303 xfs_scrub_btree_set_corrupt(bs
->sc
, bs
->cur
, 0);
307 /* Make sure this record is aligned to cluster and inoalignmnt size. */
308 agbno
= XFS_AGINO_TO_AGBNO(mp
, irec
.ir_startino
);
309 if ((agbno
& (xfs_ialloc_cluster_alignment(mp
) - 1)) ||
310 (agbno
& (xfs_icluster_size_fsb(mp
) - 1)))
311 xfs_scrub_btree_set_corrupt(bs
->sc
, bs
->cur
, 0);
313 *inode_blocks
+= XFS_B_TO_FSB(mp
,
314 irec
.ir_count
* mp
->m_sb
.sb_inodesize
);
316 /* Handle non-sparse inodes */
317 if (!xfs_inobt_issparse(irec
.ir_holemask
)) {
318 len
= XFS_B_TO_FSB(mp
,
319 XFS_INODES_PER_CHUNK
* mp
->m_sb
.sb_inodesize
);
320 if (irec
.ir_count
!= XFS_INODES_PER_CHUNK
)
321 xfs_scrub_btree_set_corrupt(bs
->sc
, bs
->cur
, 0);
323 if (!xfs_scrub_iallocbt_chunk(bs
, &irec
, agino
, len
))
328 /* Check each chunk of a sparse inode cluster. */
329 holemask
= irec
.ir_holemask
;
331 len
= XFS_B_TO_FSB(mp
,
332 XFS_INODES_PER_HOLEMASK_BIT
* mp
->m_sb
.sb_inodesize
);
333 holes
= ~xfs_inobt_irec_to_allocmask(&irec
);
334 if ((holes
& irec
.ir_free
) != holes
||
335 irec
.ir_freecount
> irec
.ir_count
)
336 xfs_scrub_btree_set_corrupt(bs
->sc
, bs
->cur
, 0);
338 for (i
= 0; i
< XFS_INOBT_HOLEMASK_BITS
; i
++) {
340 holecount
+= XFS_INODES_PER_HOLEMASK_BIT
;
341 else if (!xfs_scrub_iallocbt_chunk(bs
, &irec
, agino
, len
))
344 agino
+= XFS_INODES_PER_HOLEMASK_BIT
;
347 if (holecount
> XFS_INODES_PER_CHUNK
||
348 holecount
+ irec
.ir_count
!= XFS_INODES_PER_CHUNK
)
349 xfs_scrub_btree_set_corrupt(bs
->sc
, bs
->cur
, 0);
352 error
= xfs_scrub_iallocbt_check_freemask(bs
, &irec
);
361 * Make sure the inode btrees are as large as the rmap thinks they are.
362 * Don't bother if we're missing btree cursors, as we're already corrupt.
365 xfs_scrub_iallocbt_xref_rmap_btreeblks(
366 struct xfs_scrub_context
*sc
,
369 struct xfs_owner_info oinfo
;
370 xfs_filblks_t blocks
;
371 xfs_extlen_t inobt_blocks
= 0;
372 xfs_extlen_t finobt_blocks
= 0;
375 if (!sc
->sa
.ino_cur
|| !sc
->sa
.rmap_cur
||
376 (xfs_sb_version_hasfinobt(&sc
->mp
->m_sb
) && !sc
->sa
.fino_cur
) ||
377 xfs_scrub_skip_xref(sc
->sm
))
380 /* Check that we saw as many inobt blocks as the rmap says. */
381 error
= xfs_btree_count_blocks(sc
->sa
.ino_cur
, &inobt_blocks
);
382 if (!xfs_scrub_process_error(sc
, 0, 0, &error
))
385 if (sc
->sa
.fino_cur
) {
386 error
= xfs_btree_count_blocks(sc
->sa
.fino_cur
, &finobt_blocks
);
387 if (!xfs_scrub_process_error(sc
, 0, 0, &error
))
391 xfs_rmap_ag_owner(&oinfo
, XFS_RMAP_OWN_INOBT
);
392 error
= xfs_scrub_count_rmap_ownedby_ag(sc
, sc
->sa
.rmap_cur
, &oinfo
,
394 if (!xfs_scrub_should_check_xref(sc
, &error
, &sc
->sa
.rmap_cur
))
396 if (blocks
!= inobt_blocks
+ finobt_blocks
)
397 xfs_scrub_btree_set_corrupt(sc
, sc
->sa
.ino_cur
, 0);
401 * Make sure that the inobt records point to the same number of blocks as
402 * the rmap says are owned by inodes.
405 xfs_scrub_iallocbt_xref_rmap_inodes(
406 struct xfs_scrub_context
*sc
,
408 xfs_filblks_t inode_blocks
)
410 struct xfs_owner_info oinfo
;
411 xfs_filblks_t blocks
;
414 if (!sc
->sa
.rmap_cur
|| xfs_scrub_skip_xref(sc
->sm
))
417 /* Check that we saw as many inode blocks as the rmap knows about. */
418 xfs_rmap_ag_owner(&oinfo
, XFS_RMAP_OWN_INODES
);
419 error
= xfs_scrub_count_rmap_ownedby_ag(sc
, sc
->sa
.rmap_cur
, &oinfo
,
421 if (!xfs_scrub_should_check_xref(sc
, &error
, &sc
->sa
.rmap_cur
))
423 if (blocks
!= inode_blocks
)
424 xfs_scrub_btree_xref_set_corrupt(sc
, sc
->sa
.rmap_cur
, 0);
427 /* Scrub the inode btrees for some AG. */
430 struct xfs_scrub_context
*sc
,
433 struct xfs_btree_cur
*cur
;
434 struct xfs_owner_info oinfo
;
435 xfs_filblks_t inode_blocks
= 0;
438 xfs_rmap_ag_owner(&oinfo
, XFS_RMAP_OWN_INOBT
);
439 cur
= which
== XFS_BTNUM_INO
? sc
->sa
.ino_cur
: sc
->sa
.fino_cur
;
440 error
= xfs_scrub_btree(sc
, cur
, xfs_scrub_iallocbt_rec
, &oinfo
,
445 xfs_scrub_iallocbt_xref_rmap_btreeblks(sc
, which
);
448 * If we're scrubbing the inode btree, inode_blocks is the number of
449 * blocks pointed to by all the inode chunk records. Therefore, we
450 * should compare to the number of inode chunk blocks that the rmap
451 * knows about. We can't do this for the finobt since it only points
452 * to inode chunks with free inodes.
454 if (which
== XFS_BTNUM_INO
)
455 xfs_scrub_iallocbt_xref_rmap_inodes(sc
, which
, inode_blocks
);
462 struct xfs_scrub_context
*sc
)
464 return xfs_scrub_iallocbt(sc
, XFS_BTNUM_INO
);
469 struct xfs_scrub_context
*sc
)
471 return xfs_scrub_iallocbt(sc
, XFS_BTNUM_FINO
);
474 /* See if an inode btree has (or doesn't have) an inode chunk record. */
476 xfs_scrub_xref_inode_check(
477 struct xfs_scrub_context
*sc
,
480 struct xfs_btree_cur
**icur
,
481 bool should_have_inodes
)
486 if (!(*icur
) || xfs_scrub_skip_xref(sc
->sm
))
489 error
= xfs_ialloc_has_inodes_at_extent(*icur
, agbno
, len
, &has_inodes
);
490 if (!xfs_scrub_should_check_xref(sc
, &error
, icur
))
492 if (has_inodes
!= should_have_inodes
)
493 xfs_scrub_btree_xref_set_corrupt(sc
, *icur
, 0);
496 /* xref check that the extent is not covered by inodes */
498 xfs_scrub_xref_is_not_inode_chunk(
499 struct xfs_scrub_context
*sc
,
503 xfs_scrub_xref_inode_check(sc
, agbno
, len
, &sc
->sa
.ino_cur
, false);
504 xfs_scrub_xref_inode_check(sc
, agbno
, len
, &sc
->sa
.fino_cur
, false);
507 /* xref check that the extent is covered by inodes */
509 xfs_scrub_xref_is_inode_chunk(
510 struct xfs_scrub_context
*sc
,
514 xfs_scrub_xref_inode_check(sc
, agbno
, len
, &sc
->sa
.ino_cur
, true);