2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_types.h"
22 #include "xfs_trans.h"
25 #include "xfs_mount.h"
26 #include "xfs_bmap_btree.h"
27 #include "xfs_alloc_btree.h"
28 #include "xfs_ialloc_btree.h"
29 #include "xfs_dinode.h"
30 #include "xfs_inode.h"
31 #include "xfs_inode_item.h"
32 #include "xfs_btree.h"
33 #include "xfs_error.h"
34 #include "xfs_alloc.h"
35 #include "xfs_ialloc.h"
36 #include "xfs_fsops.h"
37 #include "xfs_itable.h"
38 #include "xfs_trans_space.h"
39 #include "xfs_rtalloc.h"
40 #include "xfs_filestream.h"
41 #include "xfs_trace.h"
44 * File system operations
54 memset(geo
, 0, sizeof(*geo
));
56 geo
->blocksize
= mp
->m_sb
.sb_blocksize
;
57 geo
->rtextsize
= mp
->m_sb
.sb_rextsize
;
58 geo
->agblocks
= mp
->m_sb
.sb_agblocks
;
59 geo
->agcount
= mp
->m_sb
.sb_agcount
;
60 geo
->logblocks
= mp
->m_sb
.sb_logblocks
;
61 geo
->sectsize
= mp
->m_sb
.sb_sectsize
;
62 geo
->inodesize
= mp
->m_sb
.sb_inodesize
;
63 geo
->imaxpct
= mp
->m_sb
.sb_imax_pct
;
64 geo
->datablocks
= mp
->m_sb
.sb_dblocks
;
65 geo
->rtblocks
= mp
->m_sb
.sb_rblocks
;
66 geo
->rtextents
= mp
->m_sb
.sb_rextents
;
67 geo
->logstart
= mp
->m_sb
.sb_logstart
;
68 ASSERT(sizeof(geo
->uuid
)==sizeof(mp
->m_sb
.sb_uuid
));
69 memcpy(geo
->uuid
, &mp
->m_sb
.sb_uuid
, sizeof(mp
->m_sb
.sb_uuid
));
70 if (new_version
>= 2) {
71 geo
->sunit
= mp
->m_sb
.sb_unit
;
72 geo
->swidth
= mp
->m_sb
.sb_width
;
74 if (new_version
>= 3) {
75 geo
->version
= XFS_FSOP_GEOM_VERSION
;
77 (xfs_sb_version_hasattr(&mp
->m_sb
) ?
78 XFS_FSOP_GEOM_FLAGS_ATTR
: 0) |
79 (xfs_sb_version_hasnlink(&mp
->m_sb
) ?
80 XFS_FSOP_GEOM_FLAGS_NLINK
: 0) |
81 (xfs_sb_version_hasquota(&mp
->m_sb
) ?
82 XFS_FSOP_GEOM_FLAGS_QUOTA
: 0) |
83 (xfs_sb_version_hasalign(&mp
->m_sb
) ?
84 XFS_FSOP_GEOM_FLAGS_IALIGN
: 0) |
85 (xfs_sb_version_hasdalign(&mp
->m_sb
) ?
86 XFS_FSOP_GEOM_FLAGS_DALIGN
: 0) |
87 (xfs_sb_version_hasshared(&mp
->m_sb
) ?
88 XFS_FSOP_GEOM_FLAGS_SHARED
: 0) |
89 (xfs_sb_version_hasextflgbit(&mp
->m_sb
) ?
90 XFS_FSOP_GEOM_FLAGS_EXTFLG
: 0) |
91 (xfs_sb_version_hasdirv2(&mp
->m_sb
) ?
92 XFS_FSOP_GEOM_FLAGS_DIRV2
: 0) |
93 (xfs_sb_version_hassector(&mp
->m_sb
) ?
94 XFS_FSOP_GEOM_FLAGS_SECTOR
: 0) |
95 (xfs_sb_version_hasasciici(&mp
->m_sb
) ?
96 XFS_FSOP_GEOM_FLAGS_DIRV2CI
: 0) |
97 (xfs_sb_version_haslazysbcount(&mp
->m_sb
) ?
98 XFS_FSOP_GEOM_FLAGS_LAZYSB
: 0) |
99 (xfs_sb_version_hasattr2(&mp
->m_sb
) ?
100 XFS_FSOP_GEOM_FLAGS_ATTR2
: 0) |
101 (xfs_sb_version_hasprojid32bit(&mp
->m_sb
) ?
102 XFS_FSOP_GEOM_FLAGS_PROJID32
: 0) |
103 (xfs_sb_version_hascrc(&mp
->m_sb
) ?
104 XFS_FSOP_GEOM_FLAGS_V5SB
: 0);
105 geo
->logsectsize
= xfs_sb_version_hassector(&mp
->m_sb
) ?
106 mp
->m_sb
.sb_logsectsize
: BBSIZE
;
107 geo
->rtsectsize
= mp
->m_sb
.sb_blocksize
;
108 geo
->dirblocksize
= mp
->m_dirblksize
;
110 if (new_version
>= 4) {
112 (xfs_sb_version_haslogv2(&mp
->m_sb
) ?
113 XFS_FSOP_GEOM_FLAGS_LOGV2
: 0);
114 geo
->logsunit
= mp
->m_sb
.sb_logsunit
;
119 static struct xfs_buf
*
120 xfs_growfs_get_hdr_buf(
121 struct xfs_mount
*mp
,
125 const struct xfs_buf_ops
*ops
)
129 bp
= xfs_buf_get_uncached(mp
->m_ddev_targp
, numblks
, flags
);
133 xfs_buf_zero(bp
, 0, BBTOB(bp
->b_length
));
135 bp
->b_maps
[0].bm_bn
= blkno
;
142 xfs_growfs_data_private(
143 xfs_mount_t
*mp
, /* mount point for filesystem */
144 xfs_growfs_data_t
*in
) /* growfs data input struct */
147 struct xfs_agfl
*agfl
;
151 xfs_extlen_t tmpsize
;
152 xfs_alloc_rec_t
*arec
;
157 xfs_agnumber_t nagcount
;
158 xfs_agnumber_t nagimax
= 0;
159 xfs_rfsblock_t nb
, nb_mod
;
161 xfs_rfsblock_t nfree
;
162 xfs_agnumber_t oagcount
;
168 if (nb
< mp
->m_sb
.sb_dblocks
|| pct
< 0 || pct
> 100)
169 return XFS_ERROR(EINVAL
);
170 if ((error
= xfs_sb_validate_fsb_count(&mp
->m_sb
, nb
)))
172 dpct
= pct
- mp
->m_sb
.sb_imax_pct
;
173 bp
= xfs_buf_read_uncached(mp
->m_ddev_targp
,
174 XFS_FSB_TO_BB(mp
, nb
) - XFS_FSS_TO_BB(mp
, 1),
175 XFS_FSS_TO_BB(mp
, 1), 0, NULL
);
179 int error
= bp
->b_error
;
185 new = nb
; /* use new as a temporary here */
186 nb_mod
= do_div(new, mp
->m_sb
.sb_agblocks
);
187 nagcount
= new + (nb_mod
!= 0);
188 if (nb_mod
&& nb_mod
< XFS_MIN_AG_BLOCKS
) {
190 nb
= (xfs_rfsblock_t
)nagcount
* mp
->m_sb
.sb_agblocks
;
191 if (nb
< mp
->m_sb
.sb_dblocks
)
192 return XFS_ERROR(EINVAL
);
194 new = nb
- mp
->m_sb
.sb_dblocks
;
195 oagcount
= mp
->m_sb
.sb_agcount
;
197 /* allocate the new per-ag structures */
198 if (nagcount
> oagcount
) {
199 error
= xfs_initialize_perag(mp
, nagcount
, &nagimax
);
204 tp
= xfs_trans_alloc(mp
, XFS_TRANS_GROWFS
);
205 tp
->t_flags
|= XFS_TRANS_RESERVE
;
206 if ((error
= xfs_trans_reserve(tp
, XFS_GROWFS_SPACE_RES(mp
),
207 XFS_GROWDATA_LOG_RES(mp
), 0, 0, 0))) {
208 xfs_trans_cancel(tp
, 0);
213 * Write new AG headers to disk. Non-transactional, but written
214 * synchronously so they are completed prior to the growfs transaction
218 for (agno
= nagcount
- 1; agno
>= oagcount
; agno
--, new -= agsize
) {
220 * AG freespace header block
222 bp
= xfs_growfs_get_hdr_buf(mp
,
223 XFS_AG_DADDR(mp
, agno
, XFS_AGF_DADDR(mp
)),
224 XFS_FSS_TO_BB(mp
, 1), 0,
231 agf
= XFS_BUF_TO_AGF(bp
);
232 agf
->agf_magicnum
= cpu_to_be32(XFS_AGF_MAGIC
);
233 agf
->agf_versionnum
= cpu_to_be32(XFS_AGF_VERSION
);
234 agf
->agf_seqno
= cpu_to_be32(agno
);
235 if (agno
== nagcount
- 1)
238 (agno
* (xfs_rfsblock_t
)mp
->m_sb
.sb_agblocks
);
240 agsize
= mp
->m_sb
.sb_agblocks
;
241 agf
->agf_length
= cpu_to_be32(agsize
);
242 agf
->agf_roots
[XFS_BTNUM_BNOi
] = cpu_to_be32(XFS_BNO_BLOCK(mp
));
243 agf
->agf_roots
[XFS_BTNUM_CNTi
] = cpu_to_be32(XFS_CNT_BLOCK(mp
));
244 agf
->agf_levels
[XFS_BTNUM_BNOi
] = cpu_to_be32(1);
245 agf
->agf_levels
[XFS_BTNUM_CNTi
] = cpu_to_be32(1);
246 agf
->agf_flfirst
= 0;
247 agf
->agf_fllast
= cpu_to_be32(XFS_AGFL_SIZE(mp
) - 1);
248 agf
->agf_flcount
= 0;
249 tmpsize
= agsize
- XFS_PREALLOC_BLOCKS(mp
);
250 agf
->agf_freeblks
= cpu_to_be32(tmpsize
);
251 agf
->agf_longest
= cpu_to_be32(tmpsize
);
252 if (xfs_sb_version_hascrc(&mp
->m_sb
))
253 uuid_copy(&agf
->agf_uuid
, &mp
->m_sb
.sb_uuid
);
255 error
= xfs_bwrite(bp
);
261 * AG freelist header block
263 bp
= xfs_growfs_get_hdr_buf(mp
,
264 XFS_AG_DADDR(mp
, agno
, XFS_AGFL_DADDR(mp
)),
265 XFS_FSS_TO_BB(mp
, 1), 0,
272 agfl
= XFS_BUF_TO_AGFL(bp
);
273 if (xfs_sb_version_hascrc(&mp
->m_sb
)) {
274 agfl
->agfl_magicnum
= cpu_to_be32(XFS_AGFL_MAGIC
);
275 agfl
->agfl_seqno
= cpu_to_be32(agno
);
276 uuid_copy(&agfl
->agfl_uuid
, &mp
->m_sb
.sb_uuid
);
278 for (bucket
= 0; bucket
< XFS_AGFL_SIZE(mp
); bucket
++)
279 agfl
->agfl_bno
[bucket
] = cpu_to_be32(NULLAGBLOCK
);
281 error
= xfs_bwrite(bp
);
287 * AG inode header block
289 bp
= xfs_growfs_get_hdr_buf(mp
,
290 XFS_AG_DADDR(mp
, agno
, XFS_AGI_DADDR(mp
)),
291 XFS_FSS_TO_BB(mp
, 1), 0,
298 agi
= XFS_BUF_TO_AGI(bp
);
299 agi
->agi_magicnum
= cpu_to_be32(XFS_AGI_MAGIC
);
300 agi
->agi_versionnum
= cpu_to_be32(XFS_AGI_VERSION
);
301 agi
->agi_seqno
= cpu_to_be32(agno
);
302 agi
->agi_length
= cpu_to_be32(agsize
);
304 agi
->agi_root
= cpu_to_be32(XFS_IBT_BLOCK(mp
));
305 agi
->agi_level
= cpu_to_be32(1);
306 agi
->agi_freecount
= 0;
307 agi
->agi_newino
= cpu_to_be32(NULLAGINO
);
308 agi
->agi_dirino
= cpu_to_be32(NULLAGINO
);
309 if (xfs_sb_version_hascrc(&mp
->m_sb
))
310 uuid_copy(&agi
->agi_uuid
, &mp
->m_sb
.sb_uuid
);
311 for (bucket
= 0; bucket
< XFS_AGI_UNLINKED_BUCKETS
; bucket
++)
312 agi
->agi_unlinked
[bucket
] = cpu_to_be32(NULLAGINO
);
314 error
= xfs_bwrite(bp
);
320 * BNO btree root block
322 bp
= xfs_growfs_get_hdr_buf(mp
,
323 XFS_AGB_TO_DADDR(mp
, agno
, XFS_BNO_BLOCK(mp
)),
324 BTOBB(mp
->m_sb
.sb_blocksize
), 0,
325 &xfs_allocbt_buf_ops
);
332 if (xfs_sb_version_hascrc(&mp
->m_sb
))
333 xfs_btree_init_block(mp
, bp
, XFS_ABTB_CRC_MAGIC
, 0, 1,
334 agno
, XFS_BTREE_CRC_BLOCKS
);
336 xfs_btree_init_block(mp
, bp
, XFS_ABTB_MAGIC
, 0, 1,
339 arec
= XFS_ALLOC_REC_ADDR(mp
, XFS_BUF_TO_BLOCK(bp
), 1);
340 arec
->ar_startblock
= cpu_to_be32(XFS_PREALLOC_BLOCKS(mp
));
341 arec
->ar_blockcount
= cpu_to_be32(
342 agsize
- be32_to_cpu(arec
->ar_startblock
));
344 error
= xfs_bwrite(bp
);
350 * CNT btree root block
352 bp
= xfs_growfs_get_hdr_buf(mp
,
353 XFS_AGB_TO_DADDR(mp
, agno
, XFS_CNT_BLOCK(mp
)),
354 BTOBB(mp
->m_sb
.sb_blocksize
), 0,
355 &xfs_allocbt_buf_ops
);
361 if (xfs_sb_version_hascrc(&mp
->m_sb
))
362 xfs_btree_init_block(mp
, bp
, XFS_ABTC_CRC_MAGIC
, 0, 1,
363 agno
, XFS_BTREE_CRC_BLOCKS
);
365 xfs_btree_init_block(mp
, bp
, XFS_ABTC_MAGIC
, 0, 1,
368 arec
= XFS_ALLOC_REC_ADDR(mp
, XFS_BUF_TO_BLOCK(bp
), 1);
369 arec
->ar_startblock
= cpu_to_be32(XFS_PREALLOC_BLOCKS(mp
));
370 arec
->ar_blockcount
= cpu_to_be32(
371 agsize
- be32_to_cpu(arec
->ar_startblock
));
372 nfree
+= be32_to_cpu(arec
->ar_blockcount
);
374 error
= xfs_bwrite(bp
);
380 * INO btree root block
382 bp
= xfs_growfs_get_hdr_buf(mp
,
383 XFS_AGB_TO_DADDR(mp
, agno
, XFS_IBT_BLOCK(mp
)),
384 BTOBB(mp
->m_sb
.sb_blocksize
), 0,
391 if (xfs_sb_version_hascrc(&mp
->m_sb
))
392 xfs_btree_init_block(mp
, bp
, XFS_IBT_CRC_MAGIC
, 0, 0,
393 agno
, XFS_BTREE_CRC_BLOCKS
);
395 xfs_btree_init_block(mp
, bp
, XFS_IBT_MAGIC
, 0, 0,
398 error
= xfs_bwrite(bp
);
403 xfs_trans_agblocks_delta(tp
, nfree
);
405 * There are new blocks in the old last a.g.
409 * Change the agi length.
411 error
= xfs_ialloc_read_agi(mp
, tp
, agno
, &bp
);
416 agi
= XFS_BUF_TO_AGI(bp
);
417 be32_add_cpu(&agi
->agi_length
, new);
418 ASSERT(nagcount
== oagcount
||
419 be32_to_cpu(agi
->agi_length
) == mp
->m_sb
.sb_agblocks
);
420 xfs_ialloc_log_agi(tp
, bp
, XFS_AGI_LENGTH
);
424 error
= xfs_alloc_read_agf(mp
, tp
, agno
, 0, &bp
);
429 agf
= XFS_BUF_TO_AGF(bp
);
430 be32_add_cpu(&agf
->agf_length
, new);
431 ASSERT(be32_to_cpu(agf
->agf_length
) ==
432 be32_to_cpu(agi
->agi_length
));
434 xfs_alloc_log_agf(tp
, bp
, XFS_AGF_LENGTH
);
436 * Free the new space.
438 error
= xfs_free_extent(tp
, XFS_AGB_TO_FSB(mp
, agno
,
439 be32_to_cpu(agf
->agf_length
) - new), new);
446 * Update changed superblock fields transactionally. These are not
447 * seen by the rest of the world until the transaction commit applies
448 * them atomically to the superblock.
450 if (nagcount
> oagcount
)
451 xfs_trans_mod_sb(tp
, XFS_TRANS_SB_AGCOUNT
, nagcount
- oagcount
);
452 if (nb
> mp
->m_sb
.sb_dblocks
)
453 xfs_trans_mod_sb(tp
, XFS_TRANS_SB_DBLOCKS
,
454 nb
- mp
->m_sb
.sb_dblocks
);
456 xfs_trans_mod_sb(tp
, XFS_TRANS_SB_FDBLOCKS
, nfree
);
458 xfs_trans_mod_sb(tp
, XFS_TRANS_SB_IMAXPCT
, dpct
);
459 error
= xfs_trans_commit(tp
, 0);
463 /* New allocation groups fully initialized, so update mount struct */
465 mp
->m_maxagi
= nagimax
;
466 if (mp
->m_sb
.sb_imax_pct
) {
467 __uint64_t icount
= mp
->m_sb
.sb_dblocks
* mp
->m_sb
.sb_imax_pct
;
469 mp
->m_maxicount
= icount
<< mp
->m_sb
.sb_inopblog
;
472 xfs_set_low_space_thresholds(mp
);
474 /* update secondary superblocks. */
475 for (agno
= 1; agno
< nagcount
; agno
++) {
478 * new secondary superblocks need to be zeroed, not read from
479 * disk as the contents of the new area we are growing into is
480 * completely unknown.
482 if (agno
< oagcount
) {
483 error
= xfs_trans_read_buf(mp
, NULL
, mp
->m_ddev_targp
,
484 XFS_AGB_TO_DADDR(mp
, agno
, XFS_SB_BLOCK(mp
)),
485 XFS_FSS_TO_BB(mp
, 1), 0, &bp
,
488 bp
= xfs_trans_get_buf(NULL
, mp
->m_ddev_targp
,
489 XFS_AGB_TO_DADDR(mp
, agno
, XFS_SB_BLOCK(mp
)),
490 XFS_FSS_TO_BB(mp
, 1), 0);
492 bp
->b_ops
= &xfs_sb_buf_ops
;
493 xfs_buf_zero(bp
, 0, BBTOB(bp
->b_length
));
500 "error %d reading secondary superblock for ag %d",
504 xfs_sb_to_disk(XFS_BUF_TO_SBP(bp
), &mp
->m_sb
, XFS_SB_ALL_BITS
);
507 * If we get an error writing out the alternate superblocks,
508 * just issue a warning and continue. The real work is
509 * already done and committed.
511 error
= xfs_bwrite(bp
);
515 "write error %d updating secondary superblock for ag %d",
517 break; /* no point in continuing */
523 xfs_trans_cancel(tp
, XFS_TRANS_ABORT
);
528 xfs_growfs_log_private(
529 xfs_mount_t
*mp
, /* mount point for filesystem */
530 xfs_growfs_log_t
*in
) /* growfs log input struct */
535 if (nb
< XFS_MIN_LOG_BLOCKS
|| nb
< XFS_B_TO_FSB(mp
, XFS_MIN_LOG_BYTES
))
536 return XFS_ERROR(EINVAL
);
537 if (nb
== mp
->m_sb
.sb_logblocks
&&
538 in
->isint
== (mp
->m_sb
.sb_logstart
!= 0))
539 return XFS_ERROR(EINVAL
);
541 * Moving the log is hard, need new interfaces to sync
542 * the log first, hold off all activity while moving it.
543 * Can have shorter or longer log in the same space,
544 * or transform internal to external log or vice versa.
546 return XFS_ERROR(ENOSYS
);
550 * protected versions of growfs function acquire and release locks on the mount
551 * point - exported through ioctls: XFS_IOC_FSGROWFSDATA, XFS_IOC_FSGROWFSLOG,
559 xfs_growfs_data_t
*in
)
563 if (!capable(CAP_SYS_ADMIN
))
564 return XFS_ERROR(EPERM
);
565 if (!mutex_trylock(&mp
->m_growlock
))
566 return XFS_ERROR(EWOULDBLOCK
);
567 error
= xfs_growfs_data_private(mp
, in
);
568 mutex_unlock(&mp
->m_growlock
);
575 xfs_growfs_log_t
*in
)
579 if (!capable(CAP_SYS_ADMIN
))
580 return XFS_ERROR(EPERM
);
581 if (!mutex_trylock(&mp
->m_growlock
))
582 return XFS_ERROR(EWOULDBLOCK
);
583 error
= xfs_growfs_log_private(mp
, in
);
584 mutex_unlock(&mp
->m_growlock
);
589 * exported through ioctl XFS_IOC_FSCOUNTS
595 xfs_fsop_counts_t
*cnt
)
597 xfs_icsb_sync_counters(mp
, XFS_ICSB_LAZY_COUNT
);
598 spin_lock(&mp
->m_sb_lock
);
599 cnt
->freedata
= mp
->m_sb
.sb_fdblocks
- XFS_ALLOC_SET_ASIDE(mp
);
600 cnt
->freertx
= mp
->m_sb
.sb_frextents
;
601 cnt
->freeino
= mp
->m_sb
.sb_ifree
;
602 cnt
->allocino
= mp
->m_sb
.sb_icount
;
603 spin_unlock(&mp
->m_sb_lock
);
608 * exported through ioctl XFS_IOC_SET_RESBLKS & XFS_IOC_GET_RESBLKS
610 * xfs_reserve_blocks is called to set m_resblks
611 * in the in-core mount table. The number of unused reserved blocks
612 * is kept in m_resblks_avail.
614 * Reserve the requested number of blocks if available. Otherwise return
615 * as many as possible to satisfy the request. The actual number
616 * reserved are returned in outval
618 * A null inval pointer indicates that only the current reserved blocks
619 * available should be returned no settings are changed.
626 xfs_fsop_resblks_t
*outval
)
628 __int64_t lcounter
, delta
, fdblks_delta
;
631 /* If inval is null, report current values and return */
632 if (inval
== (__uint64_t
*)NULL
) {
635 outval
->resblks
= mp
->m_resblks
;
636 outval
->resblks_avail
= mp
->m_resblks_avail
;
643 * With per-cpu counters, this becomes an interesting
644 * problem. we needto work out if we are freeing or allocation
645 * blocks first, then we can do the modification as necessary.
647 * We do this under the m_sb_lock so that if we are near
648 * ENOSPC, we will hold out any changes while we work out
649 * what to do. This means that the amount of free space can
650 * change while we do this, so we need to retry if we end up
651 * trying to reserve more space than is available.
653 * We also use the xfs_mod_incore_sb() interface so that we
654 * don't have to care about whether per cpu counter are
655 * enabled, disabled or even compiled in....
658 spin_lock(&mp
->m_sb_lock
);
659 xfs_icsb_sync_counters_locked(mp
, 0);
662 * If our previous reservation was larger than the current value,
663 * then move any unused blocks back to the free pool.
666 if (mp
->m_resblks
> request
) {
667 lcounter
= mp
->m_resblks_avail
- request
;
668 if (lcounter
> 0) { /* release unused blocks */
669 fdblks_delta
= lcounter
;
670 mp
->m_resblks_avail
-= lcounter
;
672 mp
->m_resblks
= request
;
676 free
= mp
->m_sb
.sb_fdblocks
- XFS_ALLOC_SET_ASIDE(mp
);
678 goto out
; /* ENOSPC and fdblks_delta = 0 */
680 delta
= request
- mp
->m_resblks
;
681 lcounter
= free
- delta
;
683 /* We can't satisfy the request, just get what we can */
684 mp
->m_resblks
+= free
;
685 mp
->m_resblks_avail
+= free
;
686 fdblks_delta
= -free
;
688 fdblks_delta
= -delta
;
689 mp
->m_resblks
= request
;
690 mp
->m_resblks_avail
+= delta
;
695 outval
->resblks
= mp
->m_resblks
;
696 outval
->resblks_avail
= mp
->m_resblks_avail
;
698 spin_unlock(&mp
->m_sb_lock
);
702 * If we are putting blocks back here, m_resblks_avail is
703 * already at its max so this will put it in the free pool.
705 * If we need space, we'll either succeed in getting it
706 * from the free block count or we'll get an enospc. If
707 * we get a ENOSPC, it means things changed while we were
708 * calculating fdblks_delta and so we should try again to
709 * see if there is anything left to reserve.
711 * Don't set the reserved flag here - we don't want to reserve
712 * the extra reserve blocks from the reserve.....
715 error
= xfs_icsb_modify_counters(mp
, XFS_SBS_FDBLOCKS
,
724 * Dump a transaction into the log that contains no real change. This is needed
725 * to be able to make the log dirty or stamp the current tail LSN into the log
726 * during the covering operation.
728 * We cannot use an inode here for this - that will push dirty state back up
729 * into the VFS and then periodic inode flushing will prevent log covering from
730 * making progress. Hence we log a field in the superblock instead and use a
731 * synchronous transaction to ensure the superblock is immediately unpinned
732 * and can be written back.
741 tp
= _xfs_trans_alloc(mp
, XFS_TRANS_DUMMY1
, KM_SLEEP
);
742 error
= xfs_trans_reserve(tp
, 0, XFS_SB_LOG_RES(mp
), 0, 0,
743 XFS_DEFAULT_LOG_COUNT
);
745 xfs_trans_cancel(tp
, 0);
749 /* log the UUID because it is an unchanging field */
750 xfs_mod_sb(tp
, XFS_SB_UUID
);
751 xfs_trans_set_sync(tp
);
752 return xfs_trans_commit(tp
, 0);
761 case XFS_FSOP_GOING_FLAGS_DEFAULT
: {
762 struct super_block
*sb
= freeze_bdev(mp
->m_super
->s_bdev
);
764 if (sb
&& !IS_ERR(sb
)) {
765 xfs_force_shutdown(mp
, SHUTDOWN_FORCE_UMOUNT
);
766 thaw_bdev(sb
->s_bdev
, sb
);
771 case XFS_FSOP_GOING_FLAGS_LOGFLUSH
:
772 xfs_force_shutdown(mp
, SHUTDOWN_FORCE_UMOUNT
);
774 case XFS_FSOP_GOING_FLAGS_NOLOGFLUSH
:
775 xfs_force_shutdown(mp
,
776 SHUTDOWN_FORCE_UMOUNT
| SHUTDOWN_LOG_IO_ERROR
);
779 return XFS_ERROR(EINVAL
);
786 * Force a shutdown of the filesystem instantly while keeping the filesystem
787 * consistent. We don't do an unmount here; just shutdown the shop, make sure
788 * that absolutely nothing persistent happens to this filesystem after this
792 xfs_do_force_shutdown(
800 logerror
= flags
& SHUTDOWN_LOG_IO_ERROR
;
802 if (!(flags
& SHUTDOWN_FORCE_UMOUNT
)) {
804 "%s(0x%x) called from line %d of file %s. Return address = 0x%p",
805 __func__
, flags
, lnnum
, fname
, __return_address
);
808 * No need to duplicate efforts.
810 if (XFS_FORCED_SHUTDOWN(mp
) && !logerror
)
814 * This flags XFS_MOUNT_FS_SHUTDOWN, makes sure that we don't
815 * queue up anybody new on the log reservations, and wakes up
816 * everybody who's sleeping on log reservations to tell them
819 if (xfs_log_force_umount(mp
, logerror
))
822 if (flags
& SHUTDOWN_CORRUPT_INCORE
) {
823 xfs_alert_tag(mp
, XFS_PTAG_SHUTDOWN_CORRUPT
,
824 "Corruption of in-memory data detected. Shutting down filesystem");
825 if (XFS_ERRLEVEL_HIGH
<= xfs_error_level
)
827 } else if (!(flags
& SHUTDOWN_FORCE_UMOUNT
)) {
829 xfs_alert_tag(mp
, XFS_PTAG_SHUTDOWN_LOGERROR
,
830 "Log I/O Error Detected. Shutting down filesystem");
831 } else if (flags
& SHUTDOWN_DEVICE_REQ
) {
832 xfs_alert_tag(mp
, XFS_PTAG_SHUTDOWN_IOERROR
,
833 "All device paths lost. Shutting down filesystem");
834 } else if (!(flags
& SHUTDOWN_REMOTE_REQ
)) {
835 xfs_alert_tag(mp
, XFS_PTAG_SHUTDOWN_IOERROR
,
836 "I/O Error Detected. Shutting down filesystem");
839 if (!(flags
& SHUTDOWN_FORCE_UMOUNT
)) {
841 "Please umount the filesystem and rectify the problem(s)");