drm/i915: kill per-ring macros
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / fs / xfs / xfs_fsops.c
blob43b1d56993350ba3af58c53757be99dfa936c017
1 /*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_types.h"
21 #include "xfs_bit.h"
22 #include "xfs_inum.h"
23 #include "xfs_log.h"
24 #include "xfs_trans.h"
25 #include "xfs_sb.h"
26 #include "xfs_ag.h"
27 #include "xfs_mount.h"
28 #include "xfs_bmap_btree.h"
29 #include "xfs_alloc_btree.h"
30 #include "xfs_ialloc_btree.h"
31 #include "xfs_dinode.h"
32 #include "xfs_inode.h"
33 #include "xfs_inode_item.h"
34 #include "xfs_btree.h"
35 #include "xfs_error.h"
36 #include "xfs_alloc.h"
37 #include "xfs_ialloc.h"
38 #include "xfs_fsops.h"
39 #include "xfs_itable.h"
40 #include "xfs_trans_space.h"
41 #include "xfs_rtalloc.h"
42 #include "xfs_rw.h"
43 #include "xfs_filestream.h"
44 #include "xfs_trace.h"
47 * File system operations
50 int
51 xfs_fs_geometry(
52 xfs_mount_t *mp,
53 xfs_fsop_geom_t *geo,
54 int new_version)
56 geo->blocksize = mp->m_sb.sb_blocksize;
57 geo->rtextsize = mp->m_sb.sb_rextsize;
58 geo->agblocks = mp->m_sb.sb_agblocks;
59 geo->agcount = mp->m_sb.sb_agcount;
60 geo->logblocks = mp->m_sb.sb_logblocks;
61 geo->sectsize = mp->m_sb.sb_sectsize;
62 geo->inodesize = mp->m_sb.sb_inodesize;
63 geo->imaxpct = mp->m_sb.sb_imax_pct;
64 geo->datablocks = mp->m_sb.sb_dblocks;
65 geo->rtblocks = mp->m_sb.sb_rblocks;
66 geo->rtextents = mp->m_sb.sb_rextents;
67 geo->logstart = mp->m_sb.sb_logstart;
68 ASSERT(sizeof(geo->uuid)==sizeof(mp->m_sb.sb_uuid));
69 memcpy(geo->uuid, &mp->m_sb.sb_uuid, sizeof(mp->m_sb.sb_uuid));
70 if (new_version >= 2) {
71 geo->sunit = mp->m_sb.sb_unit;
72 geo->swidth = mp->m_sb.sb_width;
74 if (new_version >= 3) {
75 geo->version = XFS_FSOP_GEOM_VERSION;
76 geo->flags =
77 (xfs_sb_version_hasattr(&mp->m_sb) ?
78 XFS_FSOP_GEOM_FLAGS_ATTR : 0) |
79 (xfs_sb_version_hasnlink(&mp->m_sb) ?
80 XFS_FSOP_GEOM_FLAGS_NLINK : 0) |
81 (xfs_sb_version_hasquota(&mp->m_sb) ?
82 XFS_FSOP_GEOM_FLAGS_QUOTA : 0) |
83 (xfs_sb_version_hasalign(&mp->m_sb) ?
84 XFS_FSOP_GEOM_FLAGS_IALIGN : 0) |
85 (xfs_sb_version_hasdalign(&mp->m_sb) ?
86 XFS_FSOP_GEOM_FLAGS_DALIGN : 0) |
87 (xfs_sb_version_hasshared(&mp->m_sb) ?
88 XFS_FSOP_GEOM_FLAGS_SHARED : 0) |
89 (xfs_sb_version_hasextflgbit(&mp->m_sb) ?
90 XFS_FSOP_GEOM_FLAGS_EXTFLG : 0) |
91 (xfs_sb_version_hasdirv2(&mp->m_sb) ?
92 XFS_FSOP_GEOM_FLAGS_DIRV2 : 0) |
93 (xfs_sb_version_hassector(&mp->m_sb) ?
94 XFS_FSOP_GEOM_FLAGS_SECTOR : 0) |
95 (xfs_sb_version_hasasciici(&mp->m_sb) ?
96 XFS_FSOP_GEOM_FLAGS_DIRV2CI : 0) |
97 (xfs_sb_version_haslazysbcount(&mp->m_sb) ?
98 XFS_FSOP_GEOM_FLAGS_LAZYSB : 0) |
99 (xfs_sb_version_hasattr2(&mp->m_sb) ?
100 XFS_FSOP_GEOM_FLAGS_ATTR2 : 0);
101 geo->logsectsize = xfs_sb_version_hassector(&mp->m_sb) ?
102 mp->m_sb.sb_logsectsize : BBSIZE;
103 geo->rtsectsize = mp->m_sb.sb_blocksize;
104 geo->dirblocksize = mp->m_dirblksize;
106 if (new_version >= 4) {
107 geo->flags |=
108 (xfs_sb_version_haslogv2(&mp->m_sb) ?
109 XFS_FSOP_GEOM_FLAGS_LOGV2 : 0);
110 geo->logsunit = mp->m_sb.sb_logsunit;
112 return 0;
115 static int
116 xfs_growfs_data_private(
117 xfs_mount_t *mp, /* mount point for filesystem */
118 xfs_growfs_data_t *in) /* growfs data input struct */
120 xfs_agf_t *agf;
121 xfs_agi_t *agi;
122 xfs_agnumber_t agno;
123 xfs_extlen_t agsize;
124 xfs_extlen_t tmpsize;
125 xfs_alloc_rec_t *arec;
126 struct xfs_btree_block *block;
127 xfs_buf_t *bp;
128 int bucket;
129 int dpct;
130 int error;
131 xfs_agnumber_t nagcount;
132 xfs_agnumber_t nagimax = 0;
133 xfs_rfsblock_t nb, nb_mod;
134 xfs_rfsblock_t new;
135 xfs_rfsblock_t nfree;
136 xfs_agnumber_t oagcount;
137 int pct;
138 xfs_trans_t *tp;
140 nb = in->newblocks;
141 pct = in->imaxpct;
142 if (nb < mp->m_sb.sb_dblocks || pct < 0 || pct > 100)
143 return XFS_ERROR(EINVAL);
144 if ((error = xfs_sb_validate_fsb_count(&mp->m_sb, nb)))
145 return error;
146 dpct = pct - mp->m_sb.sb_imax_pct;
147 error = xfs_read_buf(mp, mp->m_ddev_targp,
148 XFS_FSB_TO_BB(mp, nb) - XFS_FSS_TO_BB(mp, 1),
149 XFS_FSS_TO_BB(mp, 1), 0, &bp);
150 if (error)
151 return error;
152 ASSERT(bp);
153 xfs_buf_relse(bp);
155 new = nb; /* use new as a temporary here */
156 nb_mod = do_div(new, mp->m_sb.sb_agblocks);
157 nagcount = new + (nb_mod != 0);
158 if (nb_mod && nb_mod < XFS_MIN_AG_BLOCKS) {
159 nagcount--;
160 nb = (xfs_rfsblock_t)nagcount * mp->m_sb.sb_agblocks;
161 if (nb < mp->m_sb.sb_dblocks)
162 return XFS_ERROR(EINVAL);
164 new = nb - mp->m_sb.sb_dblocks;
165 oagcount = mp->m_sb.sb_agcount;
167 /* allocate the new per-ag structures */
168 if (nagcount > oagcount) {
169 error = xfs_initialize_perag(mp, nagcount, &nagimax);
170 if (error)
171 return error;
174 tp = xfs_trans_alloc(mp, XFS_TRANS_GROWFS);
175 tp->t_flags |= XFS_TRANS_RESERVE;
176 if ((error = xfs_trans_reserve(tp, XFS_GROWFS_SPACE_RES(mp),
177 XFS_GROWDATA_LOG_RES(mp), 0, 0, 0))) {
178 xfs_trans_cancel(tp, 0);
179 return error;
183 * Write new AG headers to disk. Non-transactional, but written
184 * synchronously so they are completed prior to the growfs transaction
185 * being logged.
187 nfree = 0;
188 for (agno = nagcount - 1; agno >= oagcount; agno--, new -= agsize) {
190 * AG freelist header block
192 bp = xfs_buf_get(mp->m_ddev_targp,
193 XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)),
194 XFS_FSS_TO_BB(mp, 1), XBF_LOCK | XBF_MAPPED);
195 agf = XFS_BUF_TO_AGF(bp);
196 memset(agf, 0, mp->m_sb.sb_sectsize);
197 agf->agf_magicnum = cpu_to_be32(XFS_AGF_MAGIC);
198 agf->agf_versionnum = cpu_to_be32(XFS_AGF_VERSION);
199 agf->agf_seqno = cpu_to_be32(agno);
200 if (agno == nagcount - 1)
201 agsize =
202 nb -
203 (agno * (xfs_rfsblock_t)mp->m_sb.sb_agblocks);
204 else
205 agsize = mp->m_sb.sb_agblocks;
206 agf->agf_length = cpu_to_be32(agsize);
207 agf->agf_roots[XFS_BTNUM_BNOi] = cpu_to_be32(XFS_BNO_BLOCK(mp));
208 agf->agf_roots[XFS_BTNUM_CNTi] = cpu_to_be32(XFS_CNT_BLOCK(mp));
209 agf->agf_levels[XFS_BTNUM_BNOi] = cpu_to_be32(1);
210 agf->agf_levels[XFS_BTNUM_CNTi] = cpu_to_be32(1);
211 agf->agf_flfirst = 0;
212 agf->agf_fllast = cpu_to_be32(XFS_AGFL_SIZE(mp) - 1);
213 agf->agf_flcount = 0;
214 tmpsize = agsize - XFS_PREALLOC_BLOCKS(mp);
215 agf->agf_freeblks = cpu_to_be32(tmpsize);
216 agf->agf_longest = cpu_to_be32(tmpsize);
217 error = xfs_bwrite(mp, bp);
218 if (error) {
219 goto error0;
222 * AG inode header block
224 bp = xfs_buf_get(mp->m_ddev_targp,
225 XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)),
226 XFS_FSS_TO_BB(mp, 1), XBF_LOCK | XBF_MAPPED);
227 agi = XFS_BUF_TO_AGI(bp);
228 memset(agi, 0, mp->m_sb.sb_sectsize);
229 agi->agi_magicnum = cpu_to_be32(XFS_AGI_MAGIC);
230 agi->agi_versionnum = cpu_to_be32(XFS_AGI_VERSION);
231 agi->agi_seqno = cpu_to_be32(agno);
232 agi->agi_length = cpu_to_be32(agsize);
233 agi->agi_count = 0;
234 agi->agi_root = cpu_to_be32(XFS_IBT_BLOCK(mp));
235 agi->agi_level = cpu_to_be32(1);
236 agi->agi_freecount = 0;
237 agi->agi_newino = cpu_to_be32(NULLAGINO);
238 agi->agi_dirino = cpu_to_be32(NULLAGINO);
239 for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++)
240 agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
241 error = xfs_bwrite(mp, bp);
242 if (error) {
243 goto error0;
246 * BNO btree root block
248 bp = xfs_buf_get(mp->m_ddev_targp,
249 XFS_AGB_TO_DADDR(mp, agno, XFS_BNO_BLOCK(mp)),
250 BTOBB(mp->m_sb.sb_blocksize),
251 XBF_LOCK | XBF_MAPPED);
252 block = XFS_BUF_TO_BLOCK(bp);
253 memset(block, 0, mp->m_sb.sb_blocksize);
254 block->bb_magic = cpu_to_be32(XFS_ABTB_MAGIC);
255 block->bb_level = 0;
256 block->bb_numrecs = cpu_to_be16(1);
257 block->bb_u.s.bb_leftsib = cpu_to_be32(NULLAGBLOCK);
258 block->bb_u.s.bb_rightsib = cpu_to_be32(NULLAGBLOCK);
259 arec = XFS_ALLOC_REC_ADDR(mp, block, 1);
260 arec->ar_startblock = cpu_to_be32(XFS_PREALLOC_BLOCKS(mp));
261 arec->ar_blockcount = cpu_to_be32(
262 agsize - be32_to_cpu(arec->ar_startblock));
263 error = xfs_bwrite(mp, bp);
264 if (error) {
265 goto error0;
268 * CNT btree root block
270 bp = xfs_buf_get(mp->m_ddev_targp,
271 XFS_AGB_TO_DADDR(mp, agno, XFS_CNT_BLOCK(mp)),
272 BTOBB(mp->m_sb.sb_blocksize),
273 XBF_LOCK | XBF_MAPPED);
274 block = XFS_BUF_TO_BLOCK(bp);
275 memset(block, 0, mp->m_sb.sb_blocksize);
276 block->bb_magic = cpu_to_be32(XFS_ABTC_MAGIC);
277 block->bb_level = 0;
278 block->bb_numrecs = cpu_to_be16(1);
279 block->bb_u.s.bb_leftsib = cpu_to_be32(NULLAGBLOCK);
280 block->bb_u.s.bb_rightsib = cpu_to_be32(NULLAGBLOCK);
281 arec = XFS_ALLOC_REC_ADDR(mp, block, 1);
282 arec->ar_startblock = cpu_to_be32(XFS_PREALLOC_BLOCKS(mp));
283 arec->ar_blockcount = cpu_to_be32(
284 agsize - be32_to_cpu(arec->ar_startblock));
285 nfree += be32_to_cpu(arec->ar_blockcount);
286 error = xfs_bwrite(mp, bp);
287 if (error) {
288 goto error0;
291 * INO btree root block
293 bp = xfs_buf_get(mp->m_ddev_targp,
294 XFS_AGB_TO_DADDR(mp, agno, XFS_IBT_BLOCK(mp)),
295 BTOBB(mp->m_sb.sb_blocksize),
296 XBF_LOCK | XBF_MAPPED);
297 block = XFS_BUF_TO_BLOCK(bp);
298 memset(block, 0, mp->m_sb.sb_blocksize);
299 block->bb_magic = cpu_to_be32(XFS_IBT_MAGIC);
300 block->bb_level = 0;
301 block->bb_numrecs = 0;
302 block->bb_u.s.bb_leftsib = cpu_to_be32(NULLAGBLOCK);
303 block->bb_u.s.bb_rightsib = cpu_to_be32(NULLAGBLOCK);
304 error = xfs_bwrite(mp, bp);
305 if (error) {
306 goto error0;
309 xfs_trans_agblocks_delta(tp, nfree);
311 * There are new blocks in the old last a.g.
313 if (new) {
315 * Change the agi length.
317 error = xfs_ialloc_read_agi(mp, tp, agno, &bp);
318 if (error) {
319 goto error0;
321 ASSERT(bp);
322 agi = XFS_BUF_TO_AGI(bp);
323 be32_add_cpu(&agi->agi_length, new);
324 ASSERT(nagcount == oagcount ||
325 be32_to_cpu(agi->agi_length) == mp->m_sb.sb_agblocks);
326 xfs_ialloc_log_agi(tp, bp, XFS_AGI_LENGTH);
328 * Change agf length.
330 error = xfs_alloc_read_agf(mp, tp, agno, 0, &bp);
331 if (error) {
332 goto error0;
334 ASSERT(bp);
335 agf = XFS_BUF_TO_AGF(bp);
336 be32_add_cpu(&agf->agf_length, new);
337 ASSERT(be32_to_cpu(agf->agf_length) ==
338 be32_to_cpu(agi->agi_length));
340 xfs_alloc_log_agf(tp, bp, XFS_AGF_LENGTH);
342 * Free the new space.
344 error = xfs_free_extent(tp, XFS_AGB_TO_FSB(mp, agno,
345 be32_to_cpu(agf->agf_length) - new), new);
346 if (error) {
347 goto error0;
352 * Update changed superblock fields transactionally. These are not
353 * seen by the rest of the world until the transaction commit applies
354 * them atomically to the superblock.
356 if (nagcount > oagcount)
357 xfs_trans_mod_sb(tp, XFS_TRANS_SB_AGCOUNT, nagcount - oagcount);
358 if (nb > mp->m_sb.sb_dblocks)
359 xfs_trans_mod_sb(tp, XFS_TRANS_SB_DBLOCKS,
360 nb - mp->m_sb.sb_dblocks);
361 if (nfree)
362 xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, nfree);
363 if (dpct)
364 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IMAXPCT, dpct);
365 error = xfs_trans_commit(tp, 0);
366 if (error)
367 return error;
369 /* New allocation groups fully initialized, so update mount struct */
370 if (nagimax)
371 mp->m_maxagi = nagimax;
372 if (mp->m_sb.sb_imax_pct) {
373 __uint64_t icount = mp->m_sb.sb_dblocks * mp->m_sb.sb_imax_pct;
374 do_div(icount, 100);
375 mp->m_maxicount = icount << mp->m_sb.sb_inopblog;
376 } else
377 mp->m_maxicount = 0;
379 /* update secondary superblocks. */
380 for (agno = 1; agno < nagcount; agno++) {
381 error = xfs_read_buf(mp, mp->m_ddev_targp,
382 XFS_AGB_TO_DADDR(mp, agno, XFS_SB_BLOCK(mp)),
383 XFS_FSS_TO_BB(mp, 1), 0, &bp);
384 if (error) {
385 xfs_fs_cmn_err(CE_WARN, mp,
386 "error %d reading secondary superblock for ag %d",
387 error, agno);
388 break;
390 xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb, XFS_SB_ALL_BITS);
392 * If we get an error writing out the alternate superblocks,
393 * just issue a warning and continue. The real work is
394 * already done and committed.
396 if (!(error = xfs_bwrite(mp, bp))) {
397 continue;
398 } else {
399 xfs_fs_cmn_err(CE_WARN, mp,
400 "write error %d updating secondary superblock for ag %d",
401 error, agno);
402 break; /* no point in continuing */
405 return 0;
407 error0:
408 xfs_trans_cancel(tp, XFS_TRANS_ABORT);
409 return error;
412 static int
413 xfs_growfs_log_private(
414 xfs_mount_t *mp, /* mount point for filesystem */
415 xfs_growfs_log_t *in) /* growfs log input struct */
417 xfs_extlen_t nb;
419 nb = in->newblocks;
420 if (nb < XFS_MIN_LOG_BLOCKS || nb < XFS_B_TO_FSB(mp, XFS_MIN_LOG_BYTES))
421 return XFS_ERROR(EINVAL);
422 if (nb == mp->m_sb.sb_logblocks &&
423 in->isint == (mp->m_sb.sb_logstart != 0))
424 return XFS_ERROR(EINVAL);
426 * Moving the log is hard, need new interfaces to sync
427 * the log first, hold off all activity while moving it.
428 * Can have shorter or longer log in the same space,
429 * or transform internal to external log or vice versa.
431 return XFS_ERROR(ENOSYS);
435 * protected versions of growfs function acquire and release locks on the mount
436 * point - exported through ioctls: XFS_IOC_FSGROWFSDATA, XFS_IOC_FSGROWFSLOG,
437 * XFS_IOC_FSGROWFSRT
442 xfs_growfs_data(
443 xfs_mount_t *mp,
444 xfs_growfs_data_t *in)
446 int error;
448 if (!capable(CAP_SYS_ADMIN))
449 return XFS_ERROR(EPERM);
450 if (!mutex_trylock(&mp->m_growlock))
451 return XFS_ERROR(EWOULDBLOCK);
452 error = xfs_growfs_data_private(mp, in);
453 mutex_unlock(&mp->m_growlock);
454 return error;
458 xfs_growfs_log(
459 xfs_mount_t *mp,
460 xfs_growfs_log_t *in)
462 int error;
464 if (!capable(CAP_SYS_ADMIN))
465 return XFS_ERROR(EPERM);
466 if (!mutex_trylock(&mp->m_growlock))
467 return XFS_ERROR(EWOULDBLOCK);
468 error = xfs_growfs_log_private(mp, in);
469 mutex_unlock(&mp->m_growlock);
470 return error;
474 * exported through ioctl XFS_IOC_FSCOUNTS
478 xfs_fs_counts(
479 xfs_mount_t *mp,
480 xfs_fsop_counts_t *cnt)
482 xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT);
483 spin_lock(&mp->m_sb_lock);
484 cnt->freedata = mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
485 cnt->freertx = mp->m_sb.sb_frextents;
486 cnt->freeino = mp->m_sb.sb_ifree;
487 cnt->allocino = mp->m_sb.sb_icount;
488 spin_unlock(&mp->m_sb_lock);
489 return 0;
493 * exported through ioctl XFS_IOC_SET_RESBLKS & XFS_IOC_GET_RESBLKS
495 * xfs_reserve_blocks is called to set m_resblks
496 * in the in-core mount table. The number of unused reserved blocks
497 * is kept in m_resblks_avail.
499 * Reserve the requested number of blocks if available. Otherwise return
500 * as many as possible to satisfy the request. The actual number
501 * reserved are returned in outval
503 * A null inval pointer indicates that only the current reserved blocks
504 * available should be returned no settings are changed.
508 xfs_reserve_blocks(
509 xfs_mount_t *mp,
510 __uint64_t *inval,
511 xfs_fsop_resblks_t *outval)
513 __int64_t lcounter, delta, fdblks_delta;
514 __uint64_t request;
516 /* If inval is null, report current values and return */
517 if (inval == (__uint64_t *)NULL) {
518 if (!outval)
519 return EINVAL;
520 outval->resblks = mp->m_resblks;
521 outval->resblks_avail = mp->m_resblks_avail;
522 return 0;
525 request = *inval;
528 * With per-cpu counters, this becomes an interesting
529 * problem. we needto work out if we are freeing or allocation
530 * blocks first, then we can do the modification as necessary.
532 * We do this under the m_sb_lock so that if we are near
533 * ENOSPC, we will hold out any changes while we work out
534 * what to do. This means that the amount of free space can
535 * change while we do this, so we need to retry if we end up
536 * trying to reserve more space than is available.
538 * We also use the xfs_mod_incore_sb() interface so that we
539 * don't have to care about whether per cpu counter are
540 * enabled, disabled or even compiled in....
542 retry:
543 spin_lock(&mp->m_sb_lock);
544 xfs_icsb_sync_counters_locked(mp, 0);
547 * If our previous reservation was larger than the current value,
548 * then move any unused blocks back to the free pool.
550 fdblks_delta = 0;
551 if (mp->m_resblks > request) {
552 lcounter = mp->m_resblks_avail - request;
553 if (lcounter > 0) { /* release unused blocks */
554 fdblks_delta = lcounter;
555 mp->m_resblks_avail -= lcounter;
557 mp->m_resblks = request;
558 } else {
559 __int64_t free;
561 free = mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
562 if (!free)
563 goto out; /* ENOSPC and fdblks_delta = 0 */
565 delta = request - mp->m_resblks;
566 lcounter = free - delta;
567 if (lcounter < 0) {
568 /* We can't satisfy the request, just get what we can */
569 mp->m_resblks += free;
570 mp->m_resblks_avail += free;
571 fdblks_delta = -free;
572 } else {
573 fdblks_delta = -delta;
574 mp->m_resblks = request;
575 mp->m_resblks_avail += delta;
578 out:
579 if (outval) {
580 outval->resblks = mp->m_resblks;
581 outval->resblks_avail = mp->m_resblks_avail;
583 spin_unlock(&mp->m_sb_lock);
585 if (fdblks_delta) {
587 * If we are putting blocks back here, m_resblks_avail is
588 * already at its max so this will put it in the free pool.
590 * If we need space, we'll either succeed in getting it
591 * from the free block count or we'll get an enospc. If
592 * we get a ENOSPC, it means things changed while we were
593 * calculating fdblks_delta and so we should try again to
594 * see if there is anything left to reserve.
596 * Don't set the reserved flag here - we don't want to reserve
597 * the extra reserve blocks from the reserve.....
599 int error;
600 error = xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS, fdblks_delta, 0);
601 if (error == ENOSPC)
602 goto retry;
604 return 0;
608 * Dump a transaction into the log that contains no real change. This is needed
609 * to be able to make the log dirty or stamp the current tail LSN into the log
610 * during the covering operation.
612 * We cannot use an inode here for this - that will push dirty state back up
613 * into the VFS and then periodic inode flushing will prevent log covering from
614 * making progress. Hence we log a field in the superblock instead.
617 xfs_fs_log_dummy(
618 xfs_mount_t *mp,
619 int flags)
621 xfs_trans_t *tp;
622 int error;
624 tp = _xfs_trans_alloc(mp, XFS_TRANS_DUMMY1, KM_SLEEP);
625 error = xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0,
626 XFS_DEFAULT_LOG_COUNT);
627 if (error) {
628 xfs_trans_cancel(tp, 0);
629 return error;
632 /* log the UUID because it is an unchanging field */
633 xfs_mod_sb(tp, XFS_SB_UUID);
634 if (flags & SYNC_WAIT)
635 xfs_trans_set_sync(tp);
636 return xfs_trans_commit(tp, 0);
640 xfs_fs_goingdown(
641 xfs_mount_t *mp,
642 __uint32_t inflags)
644 switch (inflags) {
645 case XFS_FSOP_GOING_FLAGS_DEFAULT: {
646 struct super_block *sb = freeze_bdev(mp->m_super->s_bdev);
648 if (sb && !IS_ERR(sb)) {
649 xfs_force_shutdown(mp, SHUTDOWN_FORCE_UMOUNT);
650 thaw_bdev(sb->s_bdev, sb);
653 break;
655 case XFS_FSOP_GOING_FLAGS_LOGFLUSH:
656 xfs_force_shutdown(mp, SHUTDOWN_FORCE_UMOUNT);
657 break;
658 case XFS_FSOP_GOING_FLAGS_NOLOGFLUSH:
659 xfs_force_shutdown(mp,
660 SHUTDOWN_FORCE_UMOUNT | SHUTDOWN_LOG_IO_ERROR);
661 break;
662 default:
663 return XFS_ERROR(EINVAL);
666 return 0;