1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
4 * Copyright (C) 2010 Red Hat, Inc.
9 #include "xfs_shared.h"
10 #include "xfs_format.h"
11 #include "xfs_log_format.h"
12 #include "xfs_trans_resv.h"
13 #include "xfs_mount.h"
14 #include "xfs_inode.h"
15 #include "xfs_extent_busy.h"
16 #include "xfs_quota.h"
17 #include "xfs_trans.h"
18 #include "xfs_trans_priv.h"
20 #include "xfs_trace.h"
21 #include "xfs_error.h"
22 #include "xfs_defer.h"
24 kmem_zone_t
*xfs_trans_zone
;
26 #if defined(CONFIG_TRACEPOINTS)
28 xfs_trans_trace_reservations(
31 struct xfs_trans_res resv
;
32 struct xfs_trans_res
*res
;
33 struct xfs_trans_res
*end_res
;
36 res
= (struct xfs_trans_res
*)M_RES(mp
);
37 end_res
= (struct xfs_trans_res
*)(M_RES(mp
) + 1);
38 for (i
= 0; res
< end_res
; i
++, res
++)
39 trace_xfs_trans_resv_calc(mp
, i
, res
);
40 xfs_log_get_max_trans_res(mp
, &resv
);
41 trace_xfs_trans_resv_calc(mp
, -1, &resv
);
44 # define xfs_trans_trace_reservations(mp)
48 * Initialize the precomputed transaction reservation values
49 * in the mount structure.
55 xfs_trans_resv_calc(mp
, M_RES(mp
));
56 xfs_trans_trace_reservations(mp
);
60 * Free the transaction structure. If there is more clean up
61 * to do when the structure is freed, add it here.
67 xfs_extent_busy_sort(&tp
->t_busy
);
68 xfs_extent_busy_clear(tp
->t_mountp
, &tp
->t_busy
, false);
70 trace_xfs_trans_free(tp
, _RET_IP_
);
71 atomic_dec(&tp
->t_mountp
->m_active_trans
);
72 if (!(tp
->t_flags
& XFS_TRANS_NO_WRITECOUNT
))
73 sb_end_intwrite(tp
->t_mountp
->m_super
);
74 xfs_trans_free_dqinfo(tp
);
75 kmem_zone_free(xfs_trans_zone
, tp
);
79 * This is called to create a new transaction which will share the
80 * permanent log reservation of the given transaction. The remaining
81 * unused block and rt extent reservations are also inherited. This
82 * implies that the original transaction is no longer allowed to allocate
83 * blocks. Locks and log items, however, are no inherited. They must
84 * be added to the new transaction explicitly.
86 STATIC
struct xfs_trans
*
90 struct xfs_trans
*ntp
;
92 trace_xfs_trans_dup(tp
, _RET_IP_
);
94 ntp
= kmem_zone_zalloc(xfs_trans_zone
, KM_SLEEP
);
97 * Initialize the new transaction structure.
99 ntp
->t_magic
= XFS_TRANS_HEADER_MAGIC
;
100 ntp
->t_mountp
= tp
->t_mountp
;
101 INIT_LIST_HEAD(&ntp
->t_items
);
102 INIT_LIST_HEAD(&ntp
->t_busy
);
104 ASSERT(tp
->t_flags
& XFS_TRANS_PERM_LOG_RES
);
105 ASSERT(tp
->t_ticket
!= NULL
);
107 ntp
->t_flags
= XFS_TRANS_PERM_LOG_RES
|
108 (tp
->t_flags
& XFS_TRANS_RESERVE
) |
109 (tp
->t_flags
& XFS_TRANS_NO_WRITECOUNT
);
110 /* We gave our writer reference to the new transaction */
111 tp
->t_flags
|= XFS_TRANS_NO_WRITECOUNT
;
112 ntp
->t_ticket
= xfs_log_ticket_get(tp
->t_ticket
);
114 ASSERT(tp
->t_blk_res
>= tp
->t_blk_res_used
);
115 ntp
->t_blk_res
= tp
->t_blk_res
- tp
->t_blk_res_used
;
116 tp
->t_blk_res
= tp
->t_blk_res_used
;
118 ntp
->t_rtx_res
= tp
->t_rtx_res
- tp
->t_rtx_res_used
;
119 tp
->t_rtx_res
= tp
->t_rtx_res_used
;
120 ntp
->t_pflags
= tp
->t_pflags
;
121 ntp
->t_agfl_dfops
= tp
->t_agfl_dfops
;
123 xfs_trans_dup_dqinfo(tp
, ntp
);
125 atomic_inc(&tp
->t_mountp
->m_active_trans
);
130 * This is called to reserve free disk blocks and log space for the
131 * given transaction. This must be done before allocating any resources
132 * within the transaction.
134 * This will return ENOSPC if there are not enough blocks available.
135 * It will sleep waiting for available log space.
136 * The only valid value for the flags parameter is XFS_RES_LOG_PERM, which
137 * is used by long running transactions. If any one of the reservations
138 * fails then they will all be backed out.
140 * This does not do quota reservations. That typically is done by the
145 struct xfs_trans
*tp
,
146 struct xfs_trans_res
*resp
,
151 bool rsvd
= (tp
->t_flags
& XFS_TRANS_RESERVE
) != 0;
153 /* Mark this thread as being in a transaction */
154 current_set_flags_nested(&tp
->t_pflags
, PF_MEMALLOC_NOFS
);
157 * Attempt to reserve the needed disk blocks by decrementing
158 * the number needed from the number available. This will
159 * fail if the count would go below zero.
162 error
= xfs_mod_fdblocks(tp
->t_mountp
, -((int64_t)blocks
), rsvd
);
164 current_restore_flags_nested(&tp
->t_pflags
, PF_MEMALLOC_NOFS
);
167 tp
->t_blk_res
+= blocks
;
171 * Reserve the log space needed for this transaction.
173 if (resp
->tr_logres
> 0) {
174 bool permanent
= false;
176 ASSERT(tp
->t_log_res
== 0 ||
177 tp
->t_log_res
== resp
->tr_logres
);
178 ASSERT(tp
->t_log_count
== 0 ||
179 tp
->t_log_count
== resp
->tr_logcount
);
181 if (resp
->tr_logflags
& XFS_TRANS_PERM_LOG_RES
) {
182 tp
->t_flags
|= XFS_TRANS_PERM_LOG_RES
;
185 ASSERT(tp
->t_ticket
== NULL
);
186 ASSERT(!(tp
->t_flags
& XFS_TRANS_PERM_LOG_RES
));
189 if (tp
->t_ticket
!= NULL
) {
190 ASSERT(resp
->tr_logflags
& XFS_TRANS_PERM_LOG_RES
);
191 error
= xfs_log_regrant(tp
->t_mountp
, tp
->t_ticket
);
193 error
= xfs_log_reserve(tp
->t_mountp
,
196 &tp
->t_ticket
, XFS_TRANSACTION
,
203 tp
->t_log_res
= resp
->tr_logres
;
204 tp
->t_log_count
= resp
->tr_logcount
;
208 * Attempt to reserve the needed realtime extents by decrementing
209 * the number needed from the number available. This will
210 * fail if the count would go below zero.
213 error
= xfs_mod_frextents(tp
->t_mountp
, -((int64_t)rtextents
));
218 tp
->t_rtx_res
+= rtextents
;
224 * Error cases jump to one of these labels to undo any
225 * reservations which have already been performed.
228 if (resp
->tr_logres
> 0) {
229 xfs_log_done(tp
->t_mountp
, tp
->t_ticket
, NULL
, false);
232 tp
->t_flags
&= ~XFS_TRANS_PERM_LOG_RES
;
237 xfs_mod_fdblocks(tp
->t_mountp
, (int64_t)blocks
, rsvd
);
241 current_restore_flags_nested(&tp
->t_pflags
, PF_MEMALLOC_NOFS
);
248 struct xfs_mount
*mp
,
249 struct xfs_trans_res
*resp
,
253 struct xfs_trans
**tpp
)
255 struct xfs_trans
*tp
;
258 if (!(flags
& XFS_TRANS_NO_WRITECOUNT
))
259 sb_start_intwrite(mp
->m_super
);
261 WARN_ON(mp
->m_super
->s_writers
.frozen
== SB_FREEZE_COMPLETE
);
262 atomic_inc(&mp
->m_active_trans
);
264 tp
= kmem_zone_zalloc(xfs_trans_zone
,
265 (flags
& XFS_TRANS_NOFS
) ? KM_NOFS
: KM_SLEEP
);
266 tp
->t_magic
= XFS_TRANS_HEADER_MAGIC
;
269 INIT_LIST_HEAD(&tp
->t_items
);
270 INIT_LIST_HEAD(&tp
->t_busy
);
272 error
= xfs_trans_reserve(tp
, resp
, blocks
, rtextents
);
274 xfs_trans_cancel(tp
);
278 trace_xfs_trans_alloc(tp
, _RET_IP_
);
285 * Create an empty transaction with no reservation. This is a defensive
286 * mechanism for routines that query metadata without actually modifying
287 * them -- if the metadata being queried is somehow cross-linked (think a
288 * btree block pointer that points higher in the tree), we risk deadlock.
289 * However, blocks grabbed as part of a transaction can be re-grabbed.
290 * The verifiers will notice the corrupt block and the operation will fail
291 * back to userspace without deadlocking.
293 * Note the zero-length reservation; this transaction MUST be cancelled
294 * without any dirty data.
297 xfs_trans_alloc_empty(
298 struct xfs_mount
*mp
,
299 struct xfs_trans
**tpp
)
301 struct xfs_trans_res resv
= {0};
303 return xfs_trans_alloc(mp
, &resv
, 0, 0, XFS_TRANS_NO_WRITECOUNT
, tpp
);
307 * Record the indicated change to the given field for application
308 * to the file system's superblock when the transaction commits.
309 * For now, just store the change in the transaction structure.
311 * Mark the transaction structure to indicate that the superblock
312 * needs to be updated before committing.
314 * Because we may not be keeping track of allocated/free inodes and
315 * used filesystem blocks in the superblock, we do not mark the
316 * superblock dirty in this transaction if we modify these fields.
317 * We still need to update the transaction deltas so that they get
318 * applied to the incore superblock, but we don't want them to
319 * cause the superblock to get locked and logged if these are the
320 * only fields in the superblock that the transaction modifies.
328 uint32_t flags
= (XFS_TRANS_DIRTY
|XFS_TRANS_SB_DIRTY
);
329 xfs_mount_t
*mp
= tp
->t_mountp
;
332 case XFS_TRANS_SB_ICOUNT
:
333 tp
->t_icount_delta
+= delta
;
334 if (xfs_sb_version_haslazysbcount(&mp
->m_sb
))
335 flags
&= ~XFS_TRANS_SB_DIRTY
;
337 case XFS_TRANS_SB_IFREE
:
338 tp
->t_ifree_delta
+= delta
;
339 if (xfs_sb_version_haslazysbcount(&mp
->m_sb
))
340 flags
&= ~XFS_TRANS_SB_DIRTY
;
342 case XFS_TRANS_SB_FDBLOCKS
:
344 * Track the number of blocks allocated in the transaction.
345 * Make sure it does not exceed the number reserved. If so,
346 * shutdown as this can lead to accounting inconsistency.
349 tp
->t_blk_res_used
+= (uint
)-delta
;
350 if (tp
->t_blk_res_used
> tp
->t_blk_res
)
351 xfs_force_shutdown(mp
, SHUTDOWN_CORRUPT_INCORE
);
353 tp
->t_fdblocks_delta
+= delta
;
354 if (xfs_sb_version_haslazysbcount(&mp
->m_sb
))
355 flags
&= ~XFS_TRANS_SB_DIRTY
;
357 case XFS_TRANS_SB_RES_FDBLOCKS
:
359 * The allocation has already been applied to the
360 * in-core superblock's counter. This should only
361 * be applied to the on-disk superblock.
363 tp
->t_res_fdblocks_delta
+= delta
;
364 if (xfs_sb_version_haslazysbcount(&mp
->m_sb
))
365 flags
&= ~XFS_TRANS_SB_DIRTY
;
367 case XFS_TRANS_SB_FREXTENTS
:
369 * Track the number of blocks allocated in the
370 * transaction. Make sure it does not exceed the
374 tp
->t_rtx_res_used
+= (uint
)-delta
;
375 ASSERT(tp
->t_rtx_res_used
<= tp
->t_rtx_res
);
377 tp
->t_frextents_delta
+= delta
;
379 case XFS_TRANS_SB_RES_FREXTENTS
:
381 * The allocation has already been applied to the
382 * in-core superblock's counter. This should only
383 * be applied to the on-disk superblock.
386 tp
->t_res_frextents_delta
+= delta
;
388 case XFS_TRANS_SB_DBLOCKS
:
390 tp
->t_dblocks_delta
+= delta
;
392 case XFS_TRANS_SB_AGCOUNT
:
394 tp
->t_agcount_delta
+= delta
;
396 case XFS_TRANS_SB_IMAXPCT
:
397 tp
->t_imaxpct_delta
+= delta
;
399 case XFS_TRANS_SB_REXTSIZE
:
400 tp
->t_rextsize_delta
+= delta
;
402 case XFS_TRANS_SB_RBMBLOCKS
:
403 tp
->t_rbmblocks_delta
+= delta
;
405 case XFS_TRANS_SB_RBLOCKS
:
406 tp
->t_rblocks_delta
+= delta
;
408 case XFS_TRANS_SB_REXTENTS
:
409 tp
->t_rextents_delta
+= delta
;
411 case XFS_TRANS_SB_REXTSLOG
:
412 tp
->t_rextslog_delta
+= delta
;
419 tp
->t_flags
|= flags
;
423 * xfs_trans_apply_sb_deltas() is called from the commit code
424 * to bring the superblock buffer into the current transaction
425 * and modify it as requested by earlier calls to xfs_trans_mod_sb().
427 * For now we just look at each field allowed to change and change
431 xfs_trans_apply_sb_deltas(
438 bp
= xfs_trans_getsb(tp
, tp
->t_mountp
, 0);
439 sbp
= XFS_BUF_TO_SBP(bp
);
442 * Check that superblock mods match the mods made to AGF counters.
444 ASSERT((tp
->t_fdblocks_delta
+ tp
->t_res_fdblocks_delta
) ==
445 (tp
->t_ag_freeblks_delta
+ tp
->t_ag_flist_delta
+
446 tp
->t_ag_btree_delta
));
449 * Only update the superblock counters if we are logging them
451 if (!xfs_sb_version_haslazysbcount(&(tp
->t_mountp
->m_sb
))) {
452 if (tp
->t_icount_delta
)
453 be64_add_cpu(&sbp
->sb_icount
, tp
->t_icount_delta
);
454 if (tp
->t_ifree_delta
)
455 be64_add_cpu(&sbp
->sb_ifree
, tp
->t_ifree_delta
);
456 if (tp
->t_fdblocks_delta
)
457 be64_add_cpu(&sbp
->sb_fdblocks
, tp
->t_fdblocks_delta
);
458 if (tp
->t_res_fdblocks_delta
)
459 be64_add_cpu(&sbp
->sb_fdblocks
, tp
->t_res_fdblocks_delta
);
462 if (tp
->t_frextents_delta
)
463 be64_add_cpu(&sbp
->sb_frextents
, tp
->t_frextents_delta
);
464 if (tp
->t_res_frextents_delta
)
465 be64_add_cpu(&sbp
->sb_frextents
, tp
->t_res_frextents_delta
);
467 if (tp
->t_dblocks_delta
) {
468 be64_add_cpu(&sbp
->sb_dblocks
, tp
->t_dblocks_delta
);
471 if (tp
->t_agcount_delta
) {
472 be32_add_cpu(&sbp
->sb_agcount
, tp
->t_agcount_delta
);
475 if (tp
->t_imaxpct_delta
) {
476 sbp
->sb_imax_pct
+= tp
->t_imaxpct_delta
;
479 if (tp
->t_rextsize_delta
) {
480 be32_add_cpu(&sbp
->sb_rextsize
, tp
->t_rextsize_delta
);
483 if (tp
->t_rbmblocks_delta
) {
484 be32_add_cpu(&sbp
->sb_rbmblocks
, tp
->t_rbmblocks_delta
);
487 if (tp
->t_rblocks_delta
) {
488 be64_add_cpu(&sbp
->sb_rblocks
, tp
->t_rblocks_delta
);
491 if (tp
->t_rextents_delta
) {
492 be64_add_cpu(&sbp
->sb_rextents
, tp
->t_rextents_delta
);
495 if (tp
->t_rextslog_delta
) {
496 sbp
->sb_rextslog
+= tp
->t_rextslog_delta
;
500 xfs_trans_buf_set_type(tp
, bp
, XFS_BLFT_SB_BUF
);
503 * Log the whole thing, the fields are noncontiguous.
505 xfs_trans_log_buf(tp
, bp
, 0, sizeof(xfs_dsb_t
) - 1);
508 * Since all the modifiable fields are contiguous, we
509 * can get away with this.
511 xfs_trans_log_buf(tp
, bp
, offsetof(xfs_dsb_t
, sb_icount
),
512 offsetof(xfs_dsb_t
, sb_frextents
) +
513 sizeof(sbp
->sb_frextents
) - 1);
521 int8_t counter
= *field
;
537 int32_t counter
= *field
;
553 int64_t counter
= *field
;
565 * xfs_trans_unreserve_and_mod_sb() is called to release unused reservations
566 * and apply superblock counter changes to the in-core superblock. The
567 * t_res_fdblocks_delta and t_res_frextents_delta fields are explicitly NOT
568 * applied to the in-core superblock. The idea is that that has already been
571 * If we are not logging superblock counters, then the inode allocated/free and
572 * used block counts are not updated in the on disk superblock. In this case,
573 * XFS_TRANS_SB_DIRTY will not be set when the transaction is updated but we
574 * still need to update the incore superblock with the changes.
577 xfs_trans_unreserve_and_mod_sb(
578 struct xfs_trans
*tp
)
580 struct xfs_mount
*mp
= tp
->t_mountp
;
581 bool rsvd
= (tp
->t_flags
& XFS_TRANS_RESERVE
) != 0;
582 int64_t blkdelta
= 0;
583 int64_t rtxdelta
= 0;
585 int64_t ifreedelta
= 0;
588 /* calculate deltas */
589 if (tp
->t_blk_res
> 0)
590 blkdelta
= tp
->t_blk_res
;
591 if ((tp
->t_fdblocks_delta
!= 0) &&
592 (xfs_sb_version_haslazysbcount(&mp
->m_sb
) ||
593 (tp
->t_flags
& XFS_TRANS_SB_DIRTY
)))
594 blkdelta
+= tp
->t_fdblocks_delta
;
596 if (tp
->t_rtx_res
> 0)
597 rtxdelta
= tp
->t_rtx_res
;
598 if ((tp
->t_frextents_delta
!= 0) &&
599 (tp
->t_flags
& XFS_TRANS_SB_DIRTY
))
600 rtxdelta
+= tp
->t_frextents_delta
;
602 if (xfs_sb_version_haslazysbcount(&mp
->m_sb
) ||
603 (tp
->t_flags
& XFS_TRANS_SB_DIRTY
)) {
604 idelta
= tp
->t_icount_delta
;
605 ifreedelta
= tp
->t_ifree_delta
;
608 /* apply the per-cpu counters */
610 error
= xfs_mod_fdblocks(mp
, blkdelta
, rsvd
);
616 error
= xfs_mod_icount(mp
, idelta
);
618 goto out_undo_fdblocks
;
622 error
= xfs_mod_ifree(mp
, ifreedelta
);
624 goto out_undo_icount
;
627 if (rtxdelta
== 0 && !(tp
->t_flags
& XFS_TRANS_SB_DIRTY
))
630 /* apply remaining deltas */
631 spin_lock(&mp
->m_sb_lock
);
633 error
= xfs_sb_mod64(&mp
->m_sb
.sb_frextents
, rtxdelta
);
638 if (tp
->t_dblocks_delta
!= 0) {
639 error
= xfs_sb_mod64(&mp
->m_sb
.sb_dblocks
, tp
->t_dblocks_delta
);
641 goto out_undo_frextents
;
643 if (tp
->t_agcount_delta
!= 0) {
644 error
= xfs_sb_mod32(&mp
->m_sb
.sb_agcount
, tp
->t_agcount_delta
);
646 goto out_undo_dblocks
;
648 if (tp
->t_imaxpct_delta
!= 0) {
649 error
= xfs_sb_mod8(&mp
->m_sb
.sb_imax_pct
, tp
->t_imaxpct_delta
);
651 goto out_undo_agcount
;
653 if (tp
->t_rextsize_delta
!= 0) {
654 error
= xfs_sb_mod32(&mp
->m_sb
.sb_rextsize
,
655 tp
->t_rextsize_delta
);
657 goto out_undo_imaxpct
;
659 if (tp
->t_rbmblocks_delta
!= 0) {
660 error
= xfs_sb_mod32(&mp
->m_sb
.sb_rbmblocks
,
661 tp
->t_rbmblocks_delta
);
663 goto out_undo_rextsize
;
665 if (tp
->t_rblocks_delta
!= 0) {
666 error
= xfs_sb_mod64(&mp
->m_sb
.sb_rblocks
, tp
->t_rblocks_delta
);
668 goto out_undo_rbmblocks
;
670 if (tp
->t_rextents_delta
!= 0) {
671 error
= xfs_sb_mod64(&mp
->m_sb
.sb_rextents
,
672 tp
->t_rextents_delta
);
674 goto out_undo_rblocks
;
676 if (tp
->t_rextslog_delta
!= 0) {
677 error
= xfs_sb_mod8(&mp
->m_sb
.sb_rextslog
,
678 tp
->t_rextslog_delta
);
680 goto out_undo_rextents
;
682 spin_unlock(&mp
->m_sb_lock
);
686 if (tp
->t_rextents_delta
)
687 xfs_sb_mod64(&mp
->m_sb
.sb_rextents
, -tp
->t_rextents_delta
);
689 if (tp
->t_rblocks_delta
)
690 xfs_sb_mod64(&mp
->m_sb
.sb_rblocks
, -tp
->t_rblocks_delta
);
692 if (tp
->t_rbmblocks_delta
)
693 xfs_sb_mod32(&mp
->m_sb
.sb_rbmblocks
, -tp
->t_rbmblocks_delta
);
695 if (tp
->t_rextsize_delta
)
696 xfs_sb_mod32(&mp
->m_sb
.sb_rextsize
, -tp
->t_rextsize_delta
);
698 if (tp
->t_rextsize_delta
)
699 xfs_sb_mod8(&mp
->m_sb
.sb_imax_pct
, -tp
->t_imaxpct_delta
);
701 if (tp
->t_agcount_delta
)
702 xfs_sb_mod32(&mp
->m_sb
.sb_agcount
, -tp
->t_agcount_delta
);
704 if (tp
->t_dblocks_delta
)
705 xfs_sb_mod64(&mp
->m_sb
.sb_dblocks
, -tp
->t_dblocks_delta
);
708 xfs_sb_mod64(&mp
->m_sb
.sb_frextents
, -rtxdelta
);
710 spin_unlock(&mp
->m_sb_lock
);
712 xfs_mod_ifree(mp
, -ifreedelta
);
715 xfs_mod_icount(mp
, -idelta
);
718 xfs_mod_fdblocks(mp
, -blkdelta
, rsvd
);
724 /* Add the given log item to the transaction's list of log items. */
727 struct xfs_trans
*tp
,
728 struct xfs_log_item
*lip
)
730 ASSERT(lip
->li_mountp
== tp
->t_mountp
);
731 ASSERT(lip
->li_ailp
== tp
->t_mountp
->m_ail
);
732 ASSERT(list_empty(&lip
->li_trans
));
733 ASSERT(!test_bit(XFS_LI_DIRTY
, &lip
->li_flags
));
735 list_add_tail(&lip
->li_trans
, &tp
->t_items
);
736 trace_xfs_trans_add_item(tp
, _RET_IP_
);
740 * Unlink the log item from the transaction. the log item is no longer
741 * considered dirty in this transaction, as the linked transaction has
742 * finished, either by abort or commit completion.
746 struct xfs_log_item
*lip
)
748 clear_bit(XFS_LI_DIRTY
, &lip
->li_flags
);
749 list_del_init(&lip
->li_trans
);
752 /* Detach and unlock all of the items in a transaction */
754 xfs_trans_free_items(
755 struct xfs_trans
*tp
,
756 xfs_lsn_t commit_lsn
,
759 struct xfs_log_item
*lip
, *next
;
761 trace_xfs_trans_free_items(tp
, _RET_IP_
);
763 list_for_each_entry_safe(lip
, next
, &tp
->t_items
, li_trans
) {
764 xfs_trans_del_item(lip
);
765 if (commit_lsn
!= NULLCOMMITLSN
)
766 lip
->li_ops
->iop_committing(lip
, commit_lsn
);
768 set_bit(XFS_LI_ABORTED
, &lip
->li_flags
);
769 lip
->li_ops
->iop_unlock(lip
);
774 xfs_log_item_batch_insert(
775 struct xfs_ail
*ailp
,
776 struct xfs_ail_cursor
*cur
,
777 struct xfs_log_item
**log_items
,
779 xfs_lsn_t commit_lsn
)
783 spin_lock(&ailp
->ail_lock
);
784 /* xfs_trans_ail_update_bulk drops ailp->ail_lock */
785 xfs_trans_ail_update_bulk(ailp
, cur
, log_items
, nr_items
, commit_lsn
);
787 for (i
= 0; i
< nr_items
; i
++) {
788 struct xfs_log_item
*lip
= log_items
[i
];
790 lip
->li_ops
->iop_unpin(lip
, 0);
795 * Bulk operation version of xfs_trans_committed that takes a log vector of
796 * items to insert into the AIL. This uses bulk AIL insertion techniques to
797 * minimise lock traffic.
799 * If we are called with the aborted flag set, it is because a log write during
800 * a CIL checkpoint commit has failed. In this case, all the items in the
801 * checkpoint have already gone through iop_commited and iop_unlock, which
802 * means that checkpoint commit abort handling is treated exactly the same
803 * as an iclog write error even though we haven't started any IO yet. Hence in
804 * this case all we need to do is iop_committed processing, followed by an
805 * iop_unpin(aborted) call.
807 * The AIL cursor is used to optimise the insert process. If commit_lsn is not
808 * at the end of the AIL, the insert cursor avoids the need to walk
809 * the AIL to find the insertion point on every xfs_log_item_batch_insert()
810 * call. This saves a lot of needless list walking and is a net win, even
811 * though it slightly increases that amount of AIL lock traffic to set it up
815 xfs_trans_committed_bulk(
816 struct xfs_ail
*ailp
,
817 struct xfs_log_vec
*log_vector
,
818 xfs_lsn_t commit_lsn
,
821 #define LOG_ITEM_BATCH_SIZE 32
822 struct xfs_log_item
*log_items
[LOG_ITEM_BATCH_SIZE
];
823 struct xfs_log_vec
*lv
;
824 struct xfs_ail_cursor cur
;
827 spin_lock(&ailp
->ail_lock
);
828 xfs_trans_ail_cursor_last(ailp
, &cur
, commit_lsn
);
829 spin_unlock(&ailp
->ail_lock
);
831 /* unpin all the log items */
832 for (lv
= log_vector
; lv
; lv
= lv
->lv_next
) {
833 struct xfs_log_item
*lip
= lv
->lv_item
;
837 set_bit(XFS_LI_ABORTED
, &lip
->li_flags
);
838 item_lsn
= lip
->li_ops
->iop_committed(lip
, commit_lsn
);
840 /* item_lsn of -1 means the item needs no further processing */
841 if (XFS_LSN_CMP(item_lsn
, (xfs_lsn_t
)-1) == 0)
845 * if we are aborting the operation, no point in inserting the
846 * object into the AIL as we are in a shutdown situation.
849 ASSERT(XFS_FORCED_SHUTDOWN(ailp
->ail_mount
));
850 lip
->li_ops
->iop_unpin(lip
, 1);
854 if (item_lsn
!= commit_lsn
) {
857 * Not a bulk update option due to unusual item_lsn.
858 * Push into AIL immediately, rechecking the lsn once
859 * we have the ail lock. Then unpin the item. This does
860 * not affect the AIL cursor the bulk insert path is
863 spin_lock(&ailp
->ail_lock
);
864 if (XFS_LSN_CMP(item_lsn
, lip
->li_lsn
) > 0)
865 xfs_trans_ail_update(ailp
, lip
, item_lsn
);
867 spin_unlock(&ailp
->ail_lock
);
868 lip
->li_ops
->iop_unpin(lip
, 0);
872 /* Item is a candidate for bulk AIL insert. */
873 log_items
[i
++] = lv
->lv_item
;
874 if (i
>= LOG_ITEM_BATCH_SIZE
) {
875 xfs_log_item_batch_insert(ailp
, &cur
, log_items
,
876 LOG_ITEM_BATCH_SIZE
, commit_lsn
);
881 /* make sure we insert the remainder! */
883 xfs_log_item_batch_insert(ailp
, &cur
, log_items
, i
, commit_lsn
);
885 spin_lock(&ailp
->ail_lock
);
886 xfs_trans_ail_cursor_done(&cur
);
887 spin_unlock(&ailp
->ail_lock
);
891 * Commit the given transaction to the log.
893 * XFS disk error handling mechanism is not based on a typical
894 * transaction abort mechanism. Logically after the filesystem
895 * gets marked 'SHUTDOWN', we can't let any new transactions
896 * be durable - ie. committed to disk - because some metadata might
897 * be inconsistent. In such cases, this returns an error, and the
898 * caller may assume that all locked objects joined to the transaction
899 * have already been unlocked as if the commit had succeeded.
900 * Do not reference the transaction structure after this call.
904 struct xfs_trans
*tp
,
907 struct xfs_mount
*mp
= tp
->t_mountp
;
908 xfs_lsn_t commit_lsn
= -1;
910 int sync
= tp
->t_flags
& XFS_TRANS_SYNC
;
912 ASSERT(!tp
->t_agfl_dfops
||
913 !xfs_defer_has_unfinished_work(tp
->t_agfl_dfops
) || regrant
);
915 trace_xfs_trans_commit(tp
, _RET_IP_
);
918 * If there is nothing to be logged by the transaction,
919 * then unlock all of the items associated with the
920 * transaction and free the transaction structure.
921 * Also make sure to return any reserved blocks to
924 if (!(tp
->t_flags
& XFS_TRANS_DIRTY
))
927 if (XFS_FORCED_SHUTDOWN(mp
)) {
932 ASSERT(tp
->t_ticket
!= NULL
);
935 * If we need to update the superblock, then do it now.
937 if (tp
->t_flags
& XFS_TRANS_SB_DIRTY
)
938 xfs_trans_apply_sb_deltas(tp
);
939 xfs_trans_apply_dquot_deltas(tp
);
941 xfs_log_commit_cil(mp
, tp
, &commit_lsn
, regrant
);
943 current_restore_flags_nested(&tp
->t_pflags
, PF_MEMALLOC_NOFS
);
947 * If the transaction needs to be synchronous, then force the
948 * log out now and wait for it.
951 error
= xfs_log_force_lsn(mp
, commit_lsn
, XFS_LOG_SYNC
, NULL
);
952 XFS_STATS_INC(mp
, xs_trans_sync
);
954 XFS_STATS_INC(mp
, xs_trans_async
);
960 xfs_trans_unreserve_and_mod_sb(tp
);
963 * It is indeed possible for the transaction to be not dirty but
964 * the dqinfo portion to be. All that means is that we have some
965 * (non-persistent) quota reservations that need to be unreserved.
967 xfs_trans_unreserve_and_mod_dquots(tp
);
969 commit_lsn
= xfs_log_done(mp
, tp
->t_ticket
, NULL
, regrant
);
970 if (commit_lsn
== -1 && !error
)
974 current_restore_flags_nested(&tp
->t_pflags
, PF_MEMALLOC_NOFS
);
975 xfs_trans_free_items(tp
, NULLCOMMITLSN
, !!error
);
978 XFS_STATS_INC(mp
, xs_trans_empty
);
984 struct xfs_trans
*tp
)
986 return __xfs_trans_commit(tp
, false);
990 * Unlock all of the transaction's items and free the transaction.
991 * The transaction must not have modified any of its items, because
992 * there is no way to restore them to their previous state.
994 * If the transaction has made a log reservation, make sure to release
999 struct xfs_trans
*tp
)
1001 struct xfs_mount
*mp
= tp
->t_mountp
;
1002 bool dirty
= (tp
->t_flags
& XFS_TRANS_DIRTY
);
1004 trace_xfs_trans_cancel(tp
, _RET_IP_
);
1007 * See if the caller is relying on us to shut down the
1008 * filesystem. This happens in paths where we detect
1009 * corruption and decide to give up.
1011 if (dirty
&& !XFS_FORCED_SHUTDOWN(mp
)) {
1012 XFS_ERROR_REPORT("xfs_trans_cancel", XFS_ERRLEVEL_LOW
, mp
);
1013 xfs_force_shutdown(mp
, SHUTDOWN_CORRUPT_INCORE
);
1016 if (!dirty
&& !XFS_FORCED_SHUTDOWN(mp
)) {
1017 struct xfs_log_item
*lip
;
1019 list_for_each_entry(lip
, &tp
->t_items
, li_trans
)
1020 ASSERT(!(lip
->li_type
== XFS_LI_EFD
));
1023 xfs_trans_unreserve_and_mod_sb(tp
);
1024 xfs_trans_unreserve_and_mod_dquots(tp
);
1027 xfs_log_done(mp
, tp
->t_ticket
, NULL
, false);
1028 tp
->t_ticket
= NULL
;
1031 /* mark this thread as no longer being in a transaction */
1032 current_restore_flags_nested(&tp
->t_pflags
, PF_MEMALLOC_NOFS
);
1034 xfs_trans_free_items(tp
, NULLCOMMITLSN
, dirty
);
1039 * Roll from one trans in the sequence of PERMANENT transactions to
1040 * the next: permanent transactions are only flushed out when
1041 * committed with xfs_trans_commit(), but we still want as soon
1042 * as possible to let chunks of it go to the log. So we commit the
1043 * chunk we've been working on and get a new transaction to continue.
1047 struct xfs_trans
**tpp
)
1049 struct xfs_trans
*trans
= *tpp
;
1050 struct xfs_trans_res tres
;
1053 trace_xfs_trans_roll(trans
, _RET_IP_
);
1056 * Copy the critical parameters from one trans to the next.
1058 tres
.tr_logres
= trans
->t_log_res
;
1059 tres
.tr_logcount
= trans
->t_log_count
;
1061 *tpp
= xfs_trans_dup(trans
);
1064 * Commit the current transaction.
1065 * If this commit failed, then it'd just unlock those items that
1066 * are not marked ihold. That also means that a filesystem shutdown
1067 * is in progress. The caller takes the responsibility to cancel
1068 * the duplicate transaction that gets returned.
1070 error
= __xfs_trans_commit(trans
, true);
1075 * Reserve space in the log for the next transaction.
1076 * This also pushes items in the "AIL", the list of logged items,
1077 * out to disk if they are taking up space at the tail of the log
1078 * that we want to use. This requires that either nothing be locked
1079 * across this call, or that anything that is locked be logged in
1080 * the prior and the next transactions.
1082 tres
.tr_logflags
= XFS_TRANS_PERM_LOG_RES
;
1083 return xfs_trans_reserve(*tpp
, &tres
, 0, 0);