fix a series of Documentation/ broken file name references
[linux-2.6/btrfs-unstable.git] / fs / xfs / xfs_trans.c
blobe040af120b69b3a69b38517cde3092773b391260
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
4 * Copyright (C) 2010 Red Hat, Inc.
5 * All Rights Reserved.
6 */
7 #include "xfs.h"
8 #include "xfs_fs.h"
9 #include "xfs_shared.h"
10 #include "xfs_format.h"
11 #include "xfs_log_format.h"
12 #include "xfs_trans_resv.h"
13 #include "xfs_mount.h"
14 #include "xfs_inode.h"
15 #include "xfs_extent_busy.h"
16 #include "xfs_quota.h"
17 #include "xfs_trans.h"
18 #include "xfs_trans_priv.h"
19 #include "xfs_log.h"
20 #include "xfs_trace.h"
21 #include "xfs_error.h"
22 #include "xfs_defer.h"
24 kmem_zone_t *xfs_trans_zone;
26 #if defined(CONFIG_TRACEPOINTS)
27 static void
28 xfs_trans_trace_reservations(
29 struct xfs_mount *mp)
31 struct xfs_trans_res resv;
32 struct xfs_trans_res *res;
33 struct xfs_trans_res *end_res;
34 int i;
36 res = (struct xfs_trans_res *)M_RES(mp);
37 end_res = (struct xfs_trans_res *)(M_RES(mp) + 1);
38 for (i = 0; res < end_res; i++, res++)
39 trace_xfs_trans_resv_calc(mp, i, res);
40 xfs_log_get_max_trans_res(mp, &resv);
41 trace_xfs_trans_resv_calc(mp, -1, &resv);
43 #else
44 # define xfs_trans_trace_reservations(mp)
45 #endif
48 * Initialize the precomputed transaction reservation values
49 * in the mount structure.
51 void
52 xfs_trans_init(
53 struct xfs_mount *mp)
55 xfs_trans_resv_calc(mp, M_RES(mp));
56 xfs_trans_trace_reservations(mp);
60 * Free the transaction structure. If there is more clean up
61 * to do when the structure is freed, add it here.
63 STATIC void
64 xfs_trans_free(
65 struct xfs_trans *tp)
67 xfs_extent_busy_sort(&tp->t_busy);
68 xfs_extent_busy_clear(tp->t_mountp, &tp->t_busy, false);
70 trace_xfs_trans_free(tp, _RET_IP_);
71 atomic_dec(&tp->t_mountp->m_active_trans);
72 if (!(tp->t_flags & XFS_TRANS_NO_WRITECOUNT))
73 sb_end_intwrite(tp->t_mountp->m_super);
74 xfs_trans_free_dqinfo(tp);
75 kmem_zone_free(xfs_trans_zone, tp);
79 * This is called to create a new transaction which will share the
80 * permanent log reservation of the given transaction. The remaining
81 * unused block and rt extent reservations are also inherited. This
82 * implies that the original transaction is no longer allowed to allocate
83 * blocks. Locks and log items, however, are no inherited. They must
84 * be added to the new transaction explicitly.
86 STATIC struct xfs_trans *
87 xfs_trans_dup(
88 struct xfs_trans *tp)
90 struct xfs_trans *ntp;
92 trace_xfs_trans_dup(tp, _RET_IP_);
94 ntp = kmem_zone_zalloc(xfs_trans_zone, KM_SLEEP);
97 * Initialize the new transaction structure.
99 ntp->t_magic = XFS_TRANS_HEADER_MAGIC;
100 ntp->t_mountp = tp->t_mountp;
101 INIT_LIST_HEAD(&ntp->t_items);
102 INIT_LIST_HEAD(&ntp->t_busy);
104 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
105 ASSERT(tp->t_ticket != NULL);
107 ntp->t_flags = XFS_TRANS_PERM_LOG_RES |
108 (tp->t_flags & XFS_TRANS_RESERVE) |
109 (tp->t_flags & XFS_TRANS_NO_WRITECOUNT);
110 /* We gave our writer reference to the new transaction */
111 tp->t_flags |= XFS_TRANS_NO_WRITECOUNT;
112 ntp->t_ticket = xfs_log_ticket_get(tp->t_ticket);
114 ASSERT(tp->t_blk_res >= tp->t_blk_res_used);
115 ntp->t_blk_res = tp->t_blk_res - tp->t_blk_res_used;
116 tp->t_blk_res = tp->t_blk_res_used;
118 ntp->t_rtx_res = tp->t_rtx_res - tp->t_rtx_res_used;
119 tp->t_rtx_res = tp->t_rtx_res_used;
120 ntp->t_pflags = tp->t_pflags;
121 ntp->t_agfl_dfops = tp->t_agfl_dfops;
123 xfs_trans_dup_dqinfo(tp, ntp);
125 atomic_inc(&tp->t_mountp->m_active_trans);
126 return ntp;
130 * This is called to reserve free disk blocks and log space for the
131 * given transaction. This must be done before allocating any resources
132 * within the transaction.
134 * This will return ENOSPC if there are not enough blocks available.
135 * It will sleep waiting for available log space.
136 * The only valid value for the flags parameter is XFS_RES_LOG_PERM, which
137 * is used by long running transactions. If any one of the reservations
138 * fails then they will all be backed out.
140 * This does not do quota reservations. That typically is done by the
141 * caller afterwards.
143 static int
144 xfs_trans_reserve(
145 struct xfs_trans *tp,
146 struct xfs_trans_res *resp,
147 uint blocks,
148 uint rtextents)
150 int error = 0;
151 bool rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
153 /* Mark this thread as being in a transaction */
154 current_set_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
157 * Attempt to reserve the needed disk blocks by decrementing
158 * the number needed from the number available. This will
159 * fail if the count would go below zero.
161 if (blocks > 0) {
162 error = xfs_mod_fdblocks(tp->t_mountp, -((int64_t)blocks), rsvd);
163 if (error != 0) {
164 current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
165 return -ENOSPC;
167 tp->t_blk_res += blocks;
171 * Reserve the log space needed for this transaction.
173 if (resp->tr_logres > 0) {
174 bool permanent = false;
176 ASSERT(tp->t_log_res == 0 ||
177 tp->t_log_res == resp->tr_logres);
178 ASSERT(tp->t_log_count == 0 ||
179 tp->t_log_count == resp->tr_logcount);
181 if (resp->tr_logflags & XFS_TRANS_PERM_LOG_RES) {
182 tp->t_flags |= XFS_TRANS_PERM_LOG_RES;
183 permanent = true;
184 } else {
185 ASSERT(tp->t_ticket == NULL);
186 ASSERT(!(tp->t_flags & XFS_TRANS_PERM_LOG_RES));
189 if (tp->t_ticket != NULL) {
190 ASSERT(resp->tr_logflags & XFS_TRANS_PERM_LOG_RES);
191 error = xfs_log_regrant(tp->t_mountp, tp->t_ticket);
192 } else {
193 error = xfs_log_reserve(tp->t_mountp,
194 resp->tr_logres,
195 resp->tr_logcount,
196 &tp->t_ticket, XFS_TRANSACTION,
197 permanent);
200 if (error)
201 goto undo_blocks;
203 tp->t_log_res = resp->tr_logres;
204 tp->t_log_count = resp->tr_logcount;
208 * Attempt to reserve the needed realtime extents by decrementing
209 * the number needed from the number available. This will
210 * fail if the count would go below zero.
212 if (rtextents > 0) {
213 error = xfs_mod_frextents(tp->t_mountp, -((int64_t)rtextents));
214 if (error) {
215 error = -ENOSPC;
216 goto undo_log;
218 tp->t_rtx_res += rtextents;
221 return 0;
224 * Error cases jump to one of these labels to undo any
225 * reservations which have already been performed.
227 undo_log:
228 if (resp->tr_logres > 0) {
229 xfs_log_done(tp->t_mountp, tp->t_ticket, NULL, false);
230 tp->t_ticket = NULL;
231 tp->t_log_res = 0;
232 tp->t_flags &= ~XFS_TRANS_PERM_LOG_RES;
235 undo_blocks:
236 if (blocks > 0) {
237 xfs_mod_fdblocks(tp->t_mountp, (int64_t)blocks, rsvd);
238 tp->t_blk_res = 0;
241 current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
243 return error;
247 xfs_trans_alloc(
248 struct xfs_mount *mp,
249 struct xfs_trans_res *resp,
250 uint blocks,
251 uint rtextents,
252 uint flags,
253 struct xfs_trans **tpp)
255 struct xfs_trans *tp;
256 int error;
258 if (!(flags & XFS_TRANS_NO_WRITECOUNT))
259 sb_start_intwrite(mp->m_super);
261 WARN_ON(mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE);
262 atomic_inc(&mp->m_active_trans);
264 tp = kmem_zone_zalloc(xfs_trans_zone,
265 (flags & XFS_TRANS_NOFS) ? KM_NOFS : KM_SLEEP);
266 tp->t_magic = XFS_TRANS_HEADER_MAGIC;
267 tp->t_flags = flags;
268 tp->t_mountp = mp;
269 INIT_LIST_HEAD(&tp->t_items);
270 INIT_LIST_HEAD(&tp->t_busy);
272 error = xfs_trans_reserve(tp, resp, blocks, rtextents);
273 if (error) {
274 xfs_trans_cancel(tp);
275 return error;
278 trace_xfs_trans_alloc(tp, _RET_IP_);
280 *tpp = tp;
281 return 0;
285 * Create an empty transaction with no reservation. This is a defensive
286 * mechanism for routines that query metadata without actually modifying
287 * them -- if the metadata being queried is somehow cross-linked (think a
288 * btree block pointer that points higher in the tree), we risk deadlock.
289 * However, blocks grabbed as part of a transaction can be re-grabbed.
290 * The verifiers will notice the corrupt block and the operation will fail
291 * back to userspace without deadlocking.
293 * Note the zero-length reservation; this transaction MUST be cancelled
294 * without any dirty data.
297 xfs_trans_alloc_empty(
298 struct xfs_mount *mp,
299 struct xfs_trans **tpp)
301 struct xfs_trans_res resv = {0};
303 return xfs_trans_alloc(mp, &resv, 0, 0, XFS_TRANS_NO_WRITECOUNT, tpp);
307 * Record the indicated change to the given field for application
308 * to the file system's superblock when the transaction commits.
309 * For now, just store the change in the transaction structure.
311 * Mark the transaction structure to indicate that the superblock
312 * needs to be updated before committing.
314 * Because we may not be keeping track of allocated/free inodes and
315 * used filesystem blocks in the superblock, we do not mark the
316 * superblock dirty in this transaction if we modify these fields.
317 * We still need to update the transaction deltas so that they get
318 * applied to the incore superblock, but we don't want them to
319 * cause the superblock to get locked and logged if these are the
320 * only fields in the superblock that the transaction modifies.
322 void
323 xfs_trans_mod_sb(
324 xfs_trans_t *tp,
325 uint field,
326 int64_t delta)
328 uint32_t flags = (XFS_TRANS_DIRTY|XFS_TRANS_SB_DIRTY);
329 xfs_mount_t *mp = tp->t_mountp;
331 switch (field) {
332 case XFS_TRANS_SB_ICOUNT:
333 tp->t_icount_delta += delta;
334 if (xfs_sb_version_haslazysbcount(&mp->m_sb))
335 flags &= ~XFS_TRANS_SB_DIRTY;
336 break;
337 case XFS_TRANS_SB_IFREE:
338 tp->t_ifree_delta += delta;
339 if (xfs_sb_version_haslazysbcount(&mp->m_sb))
340 flags &= ~XFS_TRANS_SB_DIRTY;
341 break;
342 case XFS_TRANS_SB_FDBLOCKS:
344 * Track the number of blocks allocated in the transaction.
345 * Make sure it does not exceed the number reserved. If so,
346 * shutdown as this can lead to accounting inconsistency.
348 if (delta < 0) {
349 tp->t_blk_res_used += (uint)-delta;
350 if (tp->t_blk_res_used > tp->t_blk_res)
351 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
353 tp->t_fdblocks_delta += delta;
354 if (xfs_sb_version_haslazysbcount(&mp->m_sb))
355 flags &= ~XFS_TRANS_SB_DIRTY;
356 break;
357 case XFS_TRANS_SB_RES_FDBLOCKS:
359 * The allocation has already been applied to the
360 * in-core superblock's counter. This should only
361 * be applied to the on-disk superblock.
363 tp->t_res_fdblocks_delta += delta;
364 if (xfs_sb_version_haslazysbcount(&mp->m_sb))
365 flags &= ~XFS_TRANS_SB_DIRTY;
366 break;
367 case XFS_TRANS_SB_FREXTENTS:
369 * Track the number of blocks allocated in the
370 * transaction. Make sure it does not exceed the
371 * number reserved.
373 if (delta < 0) {
374 tp->t_rtx_res_used += (uint)-delta;
375 ASSERT(tp->t_rtx_res_used <= tp->t_rtx_res);
377 tp->t_frextents_delta += delta;
378 break;
379 case XFS_TRANS_SB_RES_FREXTENTS:
381 * The allocation has already been applied to the
382 * in-core superblock's counter. This should only
383 * be applied to the on-disk superblock.
385 ASSERT(delta < 0);
386 tp->t_res_frextents_delta += delta;
387 break;
388 case XFS_TRANS_SB_DBLOCKS:
389 ASSERT(delta > 0);
390 tp->t_dblocks_delta += delta;
391 break;
392 case XFS_TRANS_SB_AGCOUNT:
393 ASSERT(delta > 0);
394 tp->t_agcount_delta += delta;
395 break;
396 case XFS_TRANS_SB_IMAXPCT:
397 tp->t_imaxpct_delta += delta;
398 break;
399 case XFS_TRANS_SB_REXTSIZE:
400 tp->t_rextsize_delta += delta;
401 break;
402 case XFS_TRANS_SB_RBMBLOCKS:
403 tp->t_rbmblocks_delta += delta;
404 break;
405 case XFS_TRANS_SB_RBLOCKS:
406 tp->t_rblocks_delta += delta;
407 break;
408 case XFS_TRANS_SB_REXTENTS:
409 tp->t_rextents_delta += delta;
410 break;
411 case XFS_TRANS_SB_REXTSLOG:
412 tp->t_rextslog_delta += delta;
413 break;
414 default:
415 ASSERT(0);
416 return;
419 tp->t_flags |= flags;
423 * xfs_trans_apply_sb_deltas() is called from the commit code
424 * to bring the superblock buffer into the current transaction
425 * and modify it as requested by earlier calls to xfs_trans_mod_sb().
427 * For now we just look at each field allowed to change and change
428 * it if necessary.
430 STATIC void
431 xfs_trans_apply_sb_deltas(
432 xfs_trans_t *tp)
434 xfs_dsb_t *sbp;
435 xfs_buf_t *bp;
436 int whole = 0;
438 bp = xfs_trans_getsb(tp, tp->t_mountp, 0);
439 sbp = XFS_BUF_TO_SBP(bp);
442 * Check that superblock mods match the mods made to AGF counters.
444 ASSERT((tp->t_fdblocks_delta + tp->t_res_fdblocks_delta) ==
445 (tp->t_ag_freeblks_delta + tp->t_ag_flist_delta +
446 tp->t_ag_btree_delta));
449 * Only update the superblock counters if we are logging them
451 if (!xfs_sb_version_haslazysbcount(&(tp->t_mountp->m_sb))) {
452 if (tp->t_icount_delta)
453 be64_add_cpu(&sbp->sb_icount, tp->t_icount_delta);
454 if (tp->t_ifree_delta)
455 be64_add_cpu(&sbp->sb_ifree, tp->t_ifree_delta);
456 if (tp->t_fdblocks_delta)
457 be64_add_cpu(&sbp->sb_fdblocks, tp->t_fdblocks_delta);
458 if (tp->t_res_fdblocks_delta)
459 be64_add_cpu(&sbp->sb_fdblocks, tp->t_res_fdblocks_delta);
462 if (tp->t_frextents_delta)
463 be64_add_cpu(&sbp->sb_frextents, tp->t_frextents_delta);
464 if (tp->t_res_frextents_delta)
465 be64_add_cpu(&sbp->sb_frextents, tp->t_res_frextents_delta);
467 if (tp->t_dblocks_delta) {
468 be64_add_cpu(&sbp->sb_dblocks, tp->t_dblocks_delta);
469 whole = 1;
471 if (tp->t_agcount_delta) {
472 be32_add_cpu(&sbp->sb_agcount, tp->t_agcount_delta);
473 whole = 1;
475 if (tp->t_imaxpct_delta) {
476 sbp->sb_imax_pct += tp->t_imaxpct_delta;
477 whole = 1;
479 if (tp->t_rextsize_delta) {
480 be32_add_cpu(&sbp->sb_rextsize, tp->t_rextsize_delta);
481 whole = 1;
483 if (tp->t_rbmblocks_delta) {
484 be32_add_cpu(&sbp->sb_rbmblocks, tp->t_rbmblocks_delta);
485 whole = 1;
487 if (tp->t_rblocks_delta) {
488 be64_add_cpu(&sbp->sb_rblocks, tp->t_rblocks_delta);
489 whole = 1;
491 if (tp->t_rextents_delta) {
492 be64_add_cpu(&sbp->sb_rextents, tp->t_rextents_delta);
493 whole = 1;
495 if (tp->t_rextslog_delta) {
496 sbp->sb_rextslog += tp->t_rextslog_delta;
497 whole = 1;
500 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF);
501 if (whole)
503 * Log the whole thing, the fields are noncontiguous.
505 xfs_trans_log_buf(tp, bp, 0, sizeof(xfs_dsb_t) - 1);
506 else
508 * Since all the modifiable fields are contiguous, we
509 * can get away with this.
511 xfs_trans_log_buf(tp, bp, offsetof(xfs_dsb_t, sb_icount),
512 offsetof(xfs_dsb_t, sb_frextents) +
513 sizeof(sbp->sb_frextents) - 1);
516 STATIC int
517 xfs_sb_mod8(
518 uint8_t *field,
519 int8_t delta)
521 int8_t counter = *field;
523 counter += delta;
524 if (counter < 0) {
525 ASSERT(0);
526 return -EINVAL;
528 *field = counter;
529 return 0;
532 STATIC int
533 xfs_sb_mod32(
534 uint32_t *field,
535 int32_t delta)
537 int32_t counter = *field;
539 counter += delta;
540 if (counter < 0) {
541 ASSERT(0);
542 return -EINVAL;
544 *field = counter;
545 return 0;
548 STATIC int
549 xfs_sb_mod64(
550 uint64_t *field,
551 int64_t delta)
553 int64_t counter = *field;
555 counter += delta;
556 if (counter < 0) {
557 ASSERT(0);
558 return -EINVAL;
560 *field = counter;
561 return 0;
565 * xfs_trans_unreserve_and_mod_sb() is called to release unused reservations
566 * and apply superblock counter changes to the in-core superblock. The
567 * t_res_fdblocks_delta and t_res_frextents_delta fields are explicitly NOT
568 * applied to the in-core superblock. The idea is that that has already been
569 * done.
571 * If we are not logging superblock counters, then the inode allocated/free and
572 * used block counts are not updated in the on disk superblock. In this case,
573 * XFS_TRANS_SB_DIRTY will not be set when the transaction is updated but we
574 * still need to update the incore superblock with the changes.
576 void
577 xfs_trans_unreserve_and_mod_sb(
578 struct xfs_trans *tp)
580 struct xfs_mount *mp = tp->t_mountp;
581 bool rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
582 int64_t blkdelta = 0;
583 int64_t rtxdelta = 0;
584 int64_t idelta = 0;
585 int64_t ifreedelta = 0;
586 int error;
588 /* calculate deltas */
589 if (tp->t_blk_res > 0)
590 blkdelta = tp->t_blk_res;
591 if ((tp->t_fdblocks_delta != 0) &&
592 (xfs_sb_version_haslazysbcount(&mp->m_sb) ||
593 (tp->t_flags & XFS_TRANS_SB_DIRTY)))
594 blkdelta += tp->t_fdblocks_delta;
596 if (tp->t_rtx_res > 0)
597 rtxdelta = tp->t_rtx_res;
598 if ((tp->t_frextents_delta != 0) &&
599 (tp->t_flags & XFS_TRANS_SB_DIRTY))
600 rtxdelta += tp->t_frextents_delta;
602 if (xfs_sb_version_haslazysbcount(&mp->m_sb) ||
603 (tp->t_flags & XFS_TRANS_SB_DIRTY)) {
604 idelta = tp->t_icount_delta;
605 ifreedelta = tp->t_ifree_delta;
608 /* apply the per-cpu counters */
609 if (blkdelta) {
610 error = xfs_mod_fdblocks(mp, blkdelta, rsvd);
611 if (error)
612 goto out;
615 if (idelta) {
616 error = xfs_mod_icount(mp, idelta);
617 if (error)
618 goto out_undo_fdblocks;
621 if (ifreedelta) {
622 error = xfs_mod_ifree(mp, ifreedelta);
623 if (error)
624 goto out_undo_icount;
627 if (rtxdelta == 0 && !(tp->t_flags & XFS_TRANS_SB_DIRTY))
628 return;
630 /* apply remaining deltas */
631 spin_lock(&mp->m_sb_lock);
632 if (rtxdelta) {
633 error = xfs_sb_mod64(&mp->m_sb.sb_frextents, rtxdelta);
634 if (error)
635 goto out_undo_ifree;
638 if (tp->t_dblocks_delta != 0) {
639 error = xfs_sb_mod64(&mp->m_sb.sb_dblocks, tp->t_dblocks_delta);
640 if (error)
641 goto out_undo_frextents;
643 if (tp->t_agcount_delta != 0) {
644 error = xfs_sb_mod32(&mp->m_sb.sb_agcount, tp->t_agcount_delta);
645 if (error)
646 goto out_undo_dblocks;
648 if (tp->t_imaxpct_delta != 0) {
649 error = xfs_sb_mod8(&mp->m_sb.sb_imax_pct, tp->t_imaxpct_delta);
650 if (error)
651 goto out_undo_agcount;
653 if (tp->t_rextsize_delta != 0) {
654 error = xfs_sb_mod32(&mp->m_sb.sb_rextsize,
655 tp->t_rextsize_delta);
656 if (error)
657 goto out_undo_imaxpct;
659 if (tp->t_rbmblocks_delta != 0) {
660 error = xfs_sb_mod32(&mp->m_sb.sb_rbmblocks,
661 tp->t_rbmblocks_delta);
662 if (error)
663 goto out_undo_rextsize;
665 if (tp->t_rblocks_delta != 0) {
666 error = xfs_sb_mod64(&mp->m_sb.sb_rblocks, tp->t_rblocks_delta);
667 if (error)
668 goto out_undo_rbmblocks;
670 if (tp->t_rextents_delta != 0) {
671 error = xfs_sb_mod64(&mp->m_sb.sb_rextents,
672 tp->t_rextents_delta);
673 if (error)
674 goto out_undo_rblocks;
676 if (tp->t_rextslog_delta != 0) {
677 error = xfs_sb_mod8(&mp->m_sb.sb_rextslog,
678 tp->t_rextslog_delta);
679 if (error)
680 goto out_undo_rextents;
682 spin_unlock(&mp->m_sb_lock);
683 return;
685 out_undo_rextents:
686 if (tp->t_rextents_delta)
687 xfs_sb_mod64(&mp->m_sb.sb_rextents, -tp->t_rextents_delta);
688 out_undo_rblocks:
689 if (tp->t_rblocks_delta)
690 xfs_sb_mod64(&mp->m_sb.sb_rblocks, -tp->t_rblocks_delta);
691 out_undo_rbmblocks:
692 if (tp->t_rbmblocks_delta)
693 xfs_sb_mod32(&mp->m_sb.sb_rbmblocks, -tp->t_rbmblocks_delta);
694 out_undo_rextsize:
695 if (tp->t_rextsize_delta)
696 xfs_sb_mod32(&mp->m_sb.sb_rextsize, -tp->t_rextsize_delta);
697 out_undo_imaxpct:
698 if (tp->t_rextsize_delta)
699 xfs_sb_mod8(&mp->m_sb.sb_imax_pct, -tp->t_imaxpct_delta);
700 out_undo_agcount:
701 if (tp->t_agcount_delta)
702 xfs_sb_mod32(&mp->m_sb.sb_agcount, -tp->t_agcount_delta);
703 out_undo_dblocks:
704 if (tp->t_dblocks_delta)
705 xfs_sb_mod64(&mp->m_sb.sb_dblocks, -tp->t_dblocks_delta);
706 out_undo_frextents:
707 if (rtxdelta)
708 xfs_sb_mod64(&mp->m_sb.sb_frextents, -rtxdelta);
709 out_undo_ifree:
710 spin_unlock(&mp->m_sb_lock);
711 if (ifreedelta)
712 xfs_mod_ifree(mp, -ifreedelta);
713 out_undo_icount:
714 if (idelta)
715 xfs_mod_icount(mp, -idelta);
716 out_undo_fdblocks:
717 if (blkdelta)
718 xfs_mod_fdblocks(mp, -blkdelta, rsvd);
719 out:
720 ASSERT(error == 0);
721 return;
724 /* Add the given log item to the transaction's list of log items. */
725 void
726 xfs_trans_add_item(
727 struct xfs_trans *tp,
728 struct xfs_log_item *lip)
730 ASSERT(lip->li_mountp == tp->t_mountp);
731 ASSERT(lip->li_ailp == tp->t_mountp->m_ail);
732 ASSERT(list_empty(&lip->li_trans));
733 ASSERT(!test_bit(XFS_LI_DIRTY, &lip->li_flags));
735 list_add_tail(&lip->li_trans, &tp->t_items);
736 trace_xfs_trans_add_item(tp, _RET_IP_);
740 * Unlink the log item from the transaction. the log item is no longer
741 * considered dirty in this transaction, as the linked transaction has
742 * finished, either by abort or commit completion.
744 void
745 xfs_trans_del_item(
746 struct xfs_log_item *lip)
748 clear_bit(XFS_LI_DIRTY, &lip->li_flags);
749 list_del_init(&lip->li_trans);
752 /* Detach and unlock all of the items in a transaction */
753 void
754 xfs_trans_free_items(
755 struct xfs_trans *tp,
756 xfs_lsn_t commit_lsn,
757 bool abort)
759 struct xfs_log_item *lip, *next;
761 trace_xfs_trans_free_items(tp, _RET_IP_);
763 list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) {
764 xfs_trans_del_item(lip);
765 if (commit_lsn != NULLCOMMITLSN)
766 lip->li_ops->iop_committing(lip, commit_lsn);
767 if (abort)
768 set_bit(XFS_LI_ABORTED, &lip->li_flags);
769 lip->li_ops->iop_unlock(lip);
773 static inline void
774 xfs_log_item_batch_insert(
775 struct xfs_ail *ailp,
776 struct xfs_ail_cursor *cur,
777 struct xfs_log_item **log_items,
778 int nr_items,
779 xfs_lsn_t commit_lsn)
781 int i;
783 spin_lock(&ailp->ail_lock);
784 /* xfs_trans_ail_update_bulk drops ailp->ail_lock */
785 xfs_trans_ail_update_bulk(ailp, cur, log_items, nr_items, commit_lsn);
787 for (i = 0; i < nr_items; i++) {
788 struct xfs_log_item *lip = log_items[i];
790 lip->li_ops->iop_unpin(lip, 0);
795 * Bulk operation version of xfs_trans_committed that takes a log vector of
796 * items to insert into the AIL. This uses bulk AIL insertion techniques to
797 * minimise lock traffic.
799 * If we are called with the aborted flag set, it is because a log write during
800 * a CIL checkpoint commit has failed. In this case, all the items in the
801 * checkpoint have already gone through iop_commited and iop_unlock, which
802 * means that checkpoint commit abort handling is treated exactly the same
803 * as an iclog write error even though we haven't started any IO yet. Hence in
804 * this case all we need to do is iop_committed processing, followed by an
805 * iop_unpin(aborted) call.
807 * The AIL cursor is used to optimise the insert process. If commit_lsn is not
808 * at the end of the AIL, the insert cursor avoids the need to walk
809 * the AIL to find the insertion point on every xfs_log_item_batch_insert()
810 * call. This saves a lot of needless list walking and is a net win, even
811 * though it slightly increases that amount of AIL lock traffic to set it up
812 * and tear it down.
814 void
815 xfs_trans_committed_bulk(
816 struct xfs_ail *ailp,
817 struct xfs_log_vec *log_vector,
818 xfs_lsn_t commit_lsn,
819 int aborted)
821 #define LOG_ITEM_BATCH_SIZE 32
822 struct xfs_log_item *log_items[LOG_ITEM_BATCH_SIZE];
823 struct xfs_log_vec *lv;
824 struct xfs_ail_cursor cur;
825 int i = 0;
827 spin_lock(&ailp->ail_lock);
828 xfs_trans_ail_cursor_last(ailp, &cur, commit_lsn);
829 spin_unlock(&ailp->ail_lock);
831 /* unpin all the log items */
832 for (lv = log_vector; lv; lv = lv->lv_next ) {
833 struct xfs_log_item *lip = lv->lv_item;
834 xfs_lsn_t item_lsn;
836 if (aborted)
837 set_bit(XFS_LI_ABORTED, &lip->li_flags);
838 item_lsn = lip->li_ops->iop_committed(lip, commit_lsn);
840 /* item_lsn of -1 means the item needs no further processing */
841 if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0)
842 continue;
845 * if we are aborting the operation, no point in inserting the
846 * object into the AIL as we are in a shutdown situation.
848 if (aborted) {
849 ASSERT(XFS_FORCED_SHUTDOWN(ailp->ail_mount));
850 lip->li_ops->iop_unpin(lip, 1);
851 continue;
854 if (item_lsn != commit_lsn) {
857 * Not a bulk update option due to unusual item_lsn.
858 * Push into AIL immediately, rechecking the lsn once
859 * we have the ail lock. Then unpin the item. This does
860 * not affect the AIL cursor the bulk insert path is
861 * using.
863 spin_lock(&ailp->ail_lock);
864 if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0)
865 xfs_trans_ail_update(ailp, lip, item_lsn);
866 else
867 spin_unlock(&ailp->ail_lock);
868 lip->li_ops->iop_unpin(lip, 0);
869 continue;
872 /* Item is a candidate for bulk AIL insert. */
873 log_items[i++] = lv->lv_item;
874 if (i >= LOG_ITEM_BATCH_SIZE) {
875 xfs_log_item_batch_insert(ailp, &cur, log_items,
876 LOG_ITEM_BATCH_SIZE, commit_lsn);
877 i = 0;
881 /* make sure we insert the remainder! */
882 if (i)
883 xfs_log_item_batch_insert(ailp, &cur, log_items, i, commit_lsn);
885 spin_lock(&ailp->ail_lock);
886 xfs_trans_ail_cursor_done(&cur);
887 spin_unlock(&ailp->ail_lock);
891 * Commit the given transaction to the log.
893 * XFS disk error handling mechanism is not based on a typical
894 * transaction abort mechanism. Logically after the filesystem
895 * gets marked 'SHUTDOWN', we can't let any new transactions
896 * be durable - ie. committed to disk - because some metadata might
897 * be inconsistent. In such cases, this returns an error, and the
898 * caller may assume that all locked objects joined to the transaction
899 * have already been unlocked as if the commit had succeeded.
900 * Do not reference the transaction structure after this call.
902 static int
903 __xfs_trans_commit(
904 struct xfs_trans *tp,
905 bool regrant)
907 struct xfs_mount *mp = tp->t_mountp;
908 xfs_lsn_t commit_lsn = -1;
909 int error = 0;
910 int sync = tp->t_flags & XFS_TRANS_SYNC;
912 ASSERT(!tp->t_agfl_dfops ||
913 !xfs_defer_has_unfinished_work(tp->t_agfl_dfops) || regrant);
915 trace_xfs_trans_commit(tp, _RET_IP_);
918 * If there is nothing to be logged by the transaction,
919 * then unlock all of the items associated with the
920 * transaction and free the transaction structure.
921 * Also make sure to return any reserved blocks to
922 * the free pool.
924 if (!(tp->t_flags & XFS_TRANS_DIRTY))
925 goto out_unreserve;
927 if (XFS_FORCED_SHUTDOWN(mp)) {
928 error = -EIO;
929 goto out_unreserve;
932 ASSERT(tp->t_ticket != NULL);
935 * If we need to update the superblock, then do it now.
937 if (tp->t_flags & XFS_TRANS_SB_DIRTY)
938 xfs_trans_apply_sb_deltas(tp);
939 xfs_trans_apply_dquot_deltas(tp);
941 xfs_log_commit_cil(mp, tp, &commit_lsn, regrant);
943 current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
944 xfs_trans_free(tp);
947 * If the transaction needs to be synchronous, then force the
948 * log out now and wait for it.
950 if (sync) {
951 error = xfs_log_force_lsn(mp, commit_lsn, XFS_LOG_SYNC, NULL);
952 XFS_STATS_INC(mp, xs_trans_sync);
953 } else {
954 XFS_STATS_INC(mp, xs_trans_async);
957 return error;
959 out_unreserve:
960 xfs_trans_unreserve_and_mod_sb(tp);
963 * It is indeed possible for the transaction to be not dirty but
964 * the dqinfo portion to be. All that means is that we have some
965 * (non-persistent) quota reservations that need to be unreserved.
967 xfs_trans_unreserve_and_mod_dquots(tp);
968 if (tp->t_ticket) {
969 commit_lsn = xfs_log_done(mp, tp->t_ticket, NULL, regrant);
970 if (commit_lsn == -1 && !error)
971 error = -EIO;
972 tp->t_ticket = NULL;
974 current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
975 xfs_trans_free_items(tp, NULLCOMMITLSN, !!error);
976 xfs_trans_free(tp);
978 XFS_STATS_INC(mp, xs_trans_empty);
979 return error;
983 xfs_trans_commit(
984 struct xfs_trans *tp)
986 return __xfs_trans_commit(tp, false);
990 * Unlock all of the transaction's items and free the transaction.
991 * The transaction must not have modified any of its items, because
992 * there is no way to restore them to their previous state.
994 * If the transaction has made a log reservation, make sure to release
995 * it as well.
997 void
998 xfs_trans_cancel(
999 struct xfs_trans *tp)
1001 struct xfs_mount *mp = tp->t_mountp;
1002 bool dirty = (tp->t_flags & XFS_TRANS_DIRTY);
1004 trace_xfs_trans_cancel(tp, _RET_IP_);
1007 * See if the caller is relying on us to shut down the
1008 * filesystem. This happens in paths where we detect
1009 * corruption and decide to give up.
1011 if (dirty && !XFS_FORCED_SHUTDOWN(mp)) {
1012 XFS_ERROR_REPORT("xfs_trans_cancel", XFS_ERRLEVEL_LOW, mp);
1013 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1015 #ifdef DEBUG
1016 if (!dirty && !XFS_FORCED_SHUTDOWN(mp)) {
1017 struct xfs_log_item *lip;
1019 list_for_each_entry(lip, &tp->t_items, li_trans)
1020 ASSERT(!(lip->li_type == XFS_LI_EFD));
1022 #endif
1023 xfs_trans_unreserve_and_mod_sb(tp);
1024 xfs_trans_unreserve_and_mod_dquots(tp);
1026 if (tp->t_ticket) {
1027 xfs_log_done(mp, tp->t_ticket, NULL, false);
1028 tp->t_ticket = NULL;
1031 /* mark this thread as no longer being in a transaction */
1032 current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
1034 xfs_trans_free_items(tp, NULLCOMMITLSN, dirty);
1035 xfs_trans_free(tp);
1039 * Roll from one trans in the sequence of PERMANENT transactions to
1040 * the next: permanent transactions are only flushed out when
1041 * committed with xfs_trans_commit(), but we still want as soon
1042 * as possible to let chunks of it go to the log. So we commit the
1043 * chunk we've been working on and get a new transaction to continue.
1046 xfs_trans_roll(
1047 struct xfs_trans **tpp)
1049 struct xfs_trans *trans = *tpp;
1050 struct xfs_trans_res tres;
1051 int error;
1053 trace_xfs_trans_roll(trans, _RET_IP_);
1056 * Copy the critical parameters from one trans to the next.
1058 tres.tr_logres = trans->t_log_res;
1059 tres.tr_logcount = trans->t_log_count;
1061 *tpp = xfs_trans_dup(trans);
1064 * Commit the current transaction.
1065 * If this commit failed, then it'd just unlock those items that
1066 * are not marked ihold. That also means that a filesystem shutdown
1067 * is in progress. The caller takes the responsibility to cancel
1068 * the duplicate transaction that gets returned.
1070 error = __xfs_trans_commit(trans, true);
1071 if (error)
1072 return error;
1075 * Reserve space in the log for the next transaction.
1076 * This also pushes items in the "AIL", the list of logged items,
1077 * out to disk if they are taking up space at the tail of the log
1078 * that we want to use. This requires that either nothing be locked
1079 * across this call, or that anything that is locked be logged in
1080 * the prior and the next transactions.
1082 tres.tr_logflags = XFS_TRANS_PERM_LOG_RES;
1083 return xfs_trans_reserve(*tpp, &tres, 0, 0);