2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
3 * Copyright (C) 2010 Red Hat, Inc.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it would be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
21 #include "xfs_shared.h"
22 #include "xfs_format.h"
23 #include "xfs_log_format.h"
24 #include "xfs_trans_resv.h"
27 #include "xfs_mount.h"
28 #include "xfs_inode.h"
29 #include "xfs_extent_busy.h"
30 #include "xfs_quota.h"
31 #include "xfs_trans.h"
32 #include "xfs_trans_priv.h"
34 #include "xfs_trace.h"
35 #include "xfs_error.h"
37 kmem_zone_t
*xfs_trans_zone
;
38 kmem_zone_t
*xfs_log_item_desc_zone
;
41 * Initialize the precomputed transaction reservation values
42 * in the mount structure.
48 xfs_trans_resv_calc(mp
, M_RES(mp
));
52 * This routine is called to allocate a transaction structure.
53 * The type parameter indicates the type of the transaction. These
54 * are enumerated in xfs_trans.h.
56 * Dynamically allocate the transaction structure from the transaction
57 * zone, initialize it, and return it to the caller.
66 sb_start_intwrite(mp
->m_super
);
67 tp
= _xfs_trans_alloc(mp
, type
, KM_SLEEP
);
68 tp
->t_flags
|= XFS_TRANS_FREEZE_PROT
;
76 xfs_km_flags_t memflags
)
80 WARN_ON(mp
->m_super
->s_writers
.frozen
== SB_FREEZE_COMPLETE
);
81 atomic_inc(&mp
->m_active_trans
);
83 tp
= kmem_zone_zalloc(xfs_trans_zone
, memflags
);
84 tp
->t_magic
= XFS_TRANS_HEADER_MAGIC
;
87 INIT_LIST_HEAD(&tp
->t_items
);
88 INIT_LIST_HEAD(&tp
->t_busy
);
93 * Free the transaction structure. If there is more clean up
94 * to do when the structure is freed, add it here.
100 xfs_extent_busy_sort(&tp
->t_busy
);
101 xfs_extent_busy_clear(tp
->t_mountp
, &tp
->t_busy
, false);
103 atomic_dec(&tp
->t_mountp
->m_active_trans
);
104 if (tp
->t_flags
& XFS_TRANS_FREEZE_PROT
)
105 sb_end_intwrite(tp
->t_mountp
->m_super
);
106 xfs_trans_free_dqinfo(tp
);
107 kmem_zone_free(xfs_trans_zone
, tp
);
111 * This is called to create a new transaction which will share the
112 * permanent log reservation of the given transaction. The remaining
113 * unused block and rt extent reservations are also inherited. This
114 * implies that the original transaction is no longer allowed to allocate
115 * blocks. Locks and log items, however, are no inherited. They must
116 * be added to the new transaction explicitly.
124 ntp
= kmem_zone_zalloc(xfs_trans_zone
, KM_SLEEP
);
127 * Initialize the new transaction structure.
129 ntp
->t_magic
= XFS_TRANS_HEADER_MAGIC
;
130 ntp
->t_type
= tp
->t_type
;
131 ntp
->t_mountp
= tp
->t_mountp
;
132 INIT_LIST_HEAD(&ntp
->t_items
);
133 INIT_LIST_HEAD(&ntp
->t_busy
);
135 ASSERT(tp
->t_flags
& XFS_TRANS_PERM_LOG_RES
);
136 ASSERT(tp
->t_ticket
!= NULL
);
138 ntp
->t_flags
= XFS_TRANS_PERM_LOG_RES
|
139 (tp
->t_flags
& XFS_TRANS_RESERVE
) |
140 (tp
->t_flags
& XFS_TRANS_FREEZE_PROT
);
141 /* We gave our writer reference to the new transaction */
142 tp
->t_flags
&= ~XFS_TRANS_FREEZE_PROT
;
143 ntp
->t_ticket
= xfs_log_ticket_get(tp
->t_ticket
);
144 ntp
->t_blk_res
= tp
->t_blk_res
- tp
->t_blk_res_used
;
145 tp
->t_blk_res
= tp
->t_blk_res_used
;
146 ntp
->t_rtx_res
= tp
->t_rtx_res
- tp
->t_rtx_res_used
;
147 tp
->t_rtx_res
= tp
->t_rtx_res_used
;
148 ntp
->t_pflags
= tp
->t_pflags
;
150 xfs_trans_dup_dqinfo(tp
, ntp
);
152 atomic_inc(&tp
->t_mountp
->m_active_trans
);
157 * This is called to reserve free disk blocks and log space for the
158 * given transaction. This must be done before allocating any resources
159 * within the transaction.
161 * This will return ENOSPC if there are not enough blocks available.
162 * It will sleep waiting for available log space.
163 * The only valid value for the flags parameter is XFS_RES_LOG_PERM, which
164 * is used by long running transactions. If any one of the reservations
165 * fails then they will all be backed out.
167 * This does not do quota reservations. That typically is done by the
172 struct xfs_trans
*tp
,
173 struct xfs_trans_res
*resp
,
178 int rsvd
= (tp
->t_flags
& XFS_TRANS_RESERVE
) != 0;
180 /* Mark this thread as being in a transaction */
181 current_set_flags_nested(&tp
->t_pflags
, PF_FSTRANS
);
184 * Attempt to reserve the needed disk blocks by decrementing
185 * the number needed from the number available. This will
186 * fail if the count would go below zero.
189 error
= xfs_icsb_modify_counters(tp
->t_mountp
, XFS_SBS_FDBLOCKS
,
190 -((int64_t)blocks
), rsvd
);
192 current_restore_flags_nested(&tp
->t_pflags
, PF_FSTRANS
);
195 tp
->t_blk_res
+= blocks
;
199 * Reserve the log space needed for this transaction.
201 if (resp
->tr_logres
> 0) {
202 bool permanent
= false;
204 ASSERT(tp
->t_log_res
== 0 ||
205 tp
->t_log_res
== resp
->tr_logres
);
206 ASSERT(tp
->t_log_count
== 0 ||
207 tp
->t_log_count
== resp
->tr_logcount
);
209 if (resp
->tr_logflags
& XFS_TRANS_PERM_LOG_RES
) {
210 tp
->t_flags
|= XFS_TRANS_PERM_LOG_RES
;
213 ASSERT(tp
->t_ticket
== NULL
);
214 ASSERT(!(tp
->t_flags
& XFS_TRANS_PERM_LOG_RES
));
217 if (tp
->t_ticket
!= NULL
) {
218 ASSERT(resp
->tr_logflags
& XFS_TRANS_PERM_LOG_RES
);
219 error
= xfs_log_regrant(tp
->t_mountp
, tp
->t_ticket
);
221 error
= xfs_log_reserve(tp
->t_mountp
,
224 &tp
->t_ticket
, XFS_TRANSACTION
,
225 permanent
, tp
->t_type
);
231 tp
->t_log_res
= resp
->tr_logres
;
232 tp
->t_log_count
= resp
->tr_logcount
;
236 * Attempt to reserve the needed realtime extents by decrementing
237 * the number needed from the number available. This will
238 * fail if the count would go below zero.
241 error
= xfs_mod_incore_sb(tp
->t_mountp
, XFS_SBS_FREXTENTS
,
242 -((int64_t)rtextents
), rsvd
);
247 tp
->t_rtx_res
+= rtextents
;
253 * Error cases jump to one of these labels to undo any
254 * reservations which have already been performed.
257 if (resp
->tr_logres
> 0) {
260 if (resp
->tr_logflags
& XFS_TRANS_PERM_LOG_RES
) {
261 log_flags
= XFS_LOG_REL_PERM_RESERV
;
265 xfs_log_done(tp
->t_mountp
, tp
->t_ticket
, NULL
, log_flags
);
268 tp
->t_flags
&= ~XFS_TRANS_PERM_LOG_RES
;
273 xfs_icsb_modify_counters(tp
->t_mountp
, XFS_SBS_FDBLOCKS
,
274 (int64_t)blocks
, rsvd
);
278 current_restore_flags_nested(&tp
->t_pflags
, PF_FSTRANS
);
284 * Record the indicated change to the given field for application
285 * to the file system's superblock when the transaction commits.
286 * For now, just store the change in the transaction structure.
288 * Mark the transaction structure to indicate that the superblock
289 * needs to be updated before committing.
291 * Because we may not be keeping track of allocated/free inodes and
292 * used filesystem blocks in the superblock, we do not mark the
293 * superblock dirty in this transaction if we modify these fields.
294 * We still need to update the transaction deltas so that they get
295 * applied to the incore superblock, but we don't want them to
296 * cause the superblock to get locked and logged if these are the
297 * only fields in the superblock that the transaction modifies.
305 uint32_t flags
= (XFS_TRANS_DIRTY
|XFS_TRANS_SB_DIRTY
);
306 xfs_mount_t
*mp
= tp
->t_mountp
;
309 case XFS_TRANS_SB_ICOUNT
:
310 tp
->t_icount_delta
+= delta
;
311 if (xfs_sb_version_haslazysbcount(&mp
->m_sb
))
312 flags
&= ~XFS_TRANS_SB_DIRTY
;
314 case XFS_TRANS_SB_IFREE
:
315 tp
->t_ifree_delta
+= delta
;
316 if (xfs_sb_version_haslazysbcount(&mp
->m_sb
))
317 flags
&= ~XFS_TRANS_SB_DIRTY
;
319 case XFS_TRANS_SB_FDBLOCKS
:
321 * Track the number of blocks allocated in the
322 * transaction. Make sure it does not exceed the
326 tp
->t_blk_res_used
+= (uint
)-delta
;
327 ASSERT(tp
->t_blk_res_used
<= tp
->t_blk_res
);
329 tp
->t_fdblocks_delta
+= delta
;
330 if (xfs_sb_version_haslazysbcount(&mp
->m_sb
))
331 flags
&= ~XFS_TRANS_SB_DIRTY
;
333 case XFS_TRANS_SB_RES_FDBLOCKS
:
335 * The allocation has already been applied to the
336 * in-core superblock's counter. This should only
337 * be applied to the on-disk superblock.
340 tp
->t_res_fdblocks_delta
+= delta
;
341 if (xfs_sb_version_haslazysbcount(&mp
->m_sb
))
342 flags
&= ~XFS_TRANS_SB_DIRTY
;
344 case XFS_TRANS_SB_FREXTENTS
:
346 * Track the number of blocks allocated in the
347 * transaction. Make sure it does not exceed the
351 tp
->t_rtx_res_used
+= (uint
)-delta
;
352 ASSERT(tp
->t_rtx_res_used
<= tp
->t_rtx_res
);
354 tp
->t_frextents_delta
+= delta
;
356 case XFS_TRANS_SB_RES_FREXTENTS
:
358 * The allocation has already been applied to the
359 * in-core superblock's counter. This should only
360 * be applied to the on-disk superblock.
363 tp
->t_res_frextents_delta
+= delta
;
365 case XFS_TRANS_SB_DBLOCKS
:
367 tp
->t_dblocks_delta
+= delta
;
369 case XFS_TRANS_SB_AGCOUNT
:
371 tp
->t_agcount_delta
+= delta
;
373 case XFS_TRANS_SB_IMAXPCT
:
374 tp
->t_imaxpct_delta
+= delta
;
376 case XFS_TRANS_SB_REXTSIZE
:
377 tp
->t_rextsize_delta
+= delta
;
379 case XFS_TRANS_SB_RBMBLOCKS
:
380 tp
->t_rbmblocks_delta
+= delta
;
382 case XFS_TRANS_SB_RBLOCKS
:
383 tp
->t_rblocks_delta
+= delta
;
385 case XFS_TRANS_SB_REXTENTS
:
386 tp
->t_rextents_delta
+= delta
;
388 case XFS_TRANS_SB_REXTSLOG
:
389 tp
->t_rextslog_delta
+= delta
;
396 tp
->t_flags
|= flags
;
400 * xfs_trans_apply_sb_deltas() is called from the commit code
401 * to bring the superblock buffer into the current transaction
402 * and modify it as requested by earlier calls to xfs_trans_mod_sb().
404 * For now we just look at each field allowed to change and change
408 xfs_trans_apply_sb_deltas(
415 bp
= xfs_trans_getsb(tp
, tp
->t_mountp
, 0);
416 sbp
= XFS_BUF_TO_SBP(bp
);
419 * Check that superblock mods match the mods made to AGF counters.
421 ASSERT((tp
->t_fdblocks_delta
+ tp
->t_res_fdblocks_delta
) ==
422 (tp
->t_ag_freeblks_delta
+ tp
->t_ag_flist_delta
+
423 tp
->t_ag_btree_delta
));
426 * Only update the superblock counters if we are logging them
428 if (!xfs_sb_version_haslazysbcount(&(tp
->t_mountp
->m_sb
))) {
429 if (tp
->t_icount_delta
)
430 be64_add_cpu(&sbp
->sb_icount
, tp
->t_icount_delta
);
431 if (tp
->t_ifree_delta
)
432 be64_add_cpu(&sbp
->sb_ifree
, tp
->t_ifree_delta
);
433 if (tp
->t_fdblocks_delta
)
434 be64_add_cpu(&sbp
->sb_fdblocks
, tp
->t_fdblocks_delta
);
435 if (tp
->t_res_fdblocks_delta
)
436 be64_add_cpu(&sbp
->sb_fdblocks
, tp
->t_res_fdblocks_delta
);
439 if (tp
->t_frextents_delta
)
440 be64_add_cpu(&sbp
->sb_frextents
, tp
->t_frextents_delta
);
441 if (tp
->t_res_frextents_delta
)
442 be64_add_cpu(&sbp
->sb_frextents
, tp
->t_res_frextents_delta
);
444 if (tp
->t_dblocks_delta
) {
445 be64_add_cpu(&sbp
->sb_dblocks
, tp
->t_dblocks_delta
);
448 if (tp
->t_agcount_delta
) {
449 be32_add_cpu(&sbp
->sb_agcount
, tp
->t_agcount_delta
);
452 if (tp
->t_imaxpct_delta
) {
453 sbp
->sb_imax_pct
+= tp
->t_imaxpct_delta
;
456 if (tp
->t_rextsize_delta
) {
457 be32_add_cpu(&sbp
->sb_rextsize
, tp
->t_rextsize_delta
);
460 if (tp
->t_rbmblocks_delta
) {
461 be32_add_cpu(&sbp
->sb_rbmblocks
, tp
->t_rbmblocks_delta
);
464 if (tp
->t_rblocks_delta
) {
465 be64_add_cpu(&sbp
->sb_rblocks
, tp
->t_rblocks_delta
);
468 if (tp
->t_rextents_delta
) {
469 be64_add_cpu(&sbp
->sb_rextents
, tp
->t_rextents_delta
);
472 if (tp
->t_rextslog_delta
) {
473 sbp
->sb_rextslog
+= tp
->t_rextslog_delta
;
479 * Log the whole thing, the fields are noncontiguous.
481 xfs_trans_log_buf(tp
, bp
, 0, sizeof(xfs_dsb_t
) - 1);
484 * Since all the modifiable fields are contiguous, we
485 * can get away with this.
487 xfs_trans_log_buf(tp
, bp
, offsetof(xfs_dsb_t
, sb_icount
),
488 offsetof(xfs_dsb_t
, sb_frextents
) +
489 sizeof(sbp
->sb_frextents
) - 1);
493 * xfs_trans_unreserve_and_mod_sb() is called to release unused reservations
494 * and apply superblock counter changes to the in-core superblock. The
495 * t_res_fdblocks_delta and t_res_frextents_delta fields are explicitly NOT
496 * applied to the in-core superblock. The idea is that that has already been
499 * This is done efficiently with a single call to xfs_mod_incore_sb_batch().
500 * However, we have to ensure that we only modify each superblock field only
501 * once because the application of the delta values may not be atomic. That can
502 * lead to ENOSPC races occurring if we have two separate modifcations of the
503 * free space counter to put back the entire reservation and then take away
506 * If we are not logging superblock counters, then the inode allocated/free and
507 * used block counts are not updated in the on disk superblock. In this case,
508 * XFS_TRANS_SB_DIRTY will not be set when the transaction is updated but we
509 * still need to update the incore superblock with the changes.
512 xfs_trans_unreserve_and_mod_sb(
515 xfs_mod_sb_t msb
[9]; /* If you add cases, add entries */
517 xfs_mount_t
*mp
= tp
->t_mountp
;
521 int64_t blkdelta
= 0;
522 int64_t rtxdelta
= 0;
524 int64_t ifreedelta
= 0;
527 rsvd
= (tp
->t_flags
& XFS_TRANS_RESERVE
) != 0;
529 /* calculate deltas */
530 if (tp
->t_blk_res
> 0)
531 blkdelta
= tp
->t_blk_res
;
532 if ((tp
->t_fdblocks_delta
!= 0) &&
533 (xfs_sb_version_haslazysbcount(&mp
->m_sb
) ||
534 (tp
->t_flags
& XFS_TRANS_SB_DIRTY
)))
535 blkdelta
+= tp
->t_fdblocks_delta
;
537 if (tp
->t_rtx_res
> 0)
538 rtxdelta
= tp
->t_rtx_res
;
539 if ((tp
->t_frextents_delta
!= 0) &&
540 (tp
->t_flags
& XFS_TRANS_SB_DIRTY
))
541 rtxdelta
+= tp
->t_frextents_delta
;
543 if (xfs_sb_version_haslazysbcount(&mp
->m_sb
) ||
544 (tp
->t_flags
& XFS_TRANS_SB_DIRTY
)) {
545 idelta
= tp
->t_icount_delta
;
546 ifreedelta
= tp
->t_ifree_delta
;
549 /* apply the per-cpu counters */
551 error
= xfs_icsb_modify_counters(mp
, XFS_SBS_FDBLOCKS
,
558 error
= xfs_icsb_modify_counters(mp
, XFS_SBS_ICOUNT
,
561 goto out_undo_fdblocks
;
565 error
= xfs_icsb_modify_counters(mp
, XFS_SBS_IFREE
,
568 goto out_undo_icount
;
571 /* apply remaining deltas */
573 msbp
->msb_field
= XFS_SBS_FREXTENTS
;
574 msbp
->msb_delta
= rtxdelta
;
578 if (tp
->t_flags
& XFS_TRANS_SB_DIRTY
) {
579 if (tp
->t_dblocks_delta
!= 0) {
580 msbp
->msb_field
= XFS_SBS_DBLOCKS
;
581 msbp
->msb_delta
= tp
->t_dblocks_delta
;
584 if (tp
->t_agcount_delta
!= 0) {
585 msbp
->msb_field
= XFS_SBS_AGCOUNT
;
586 msbp
->msb_delta
= tp
->t_agcount_delta
;
589 if (tp
->t_imaxpct_delta
!= 0) {
590 msbp
->msb_field
= XFS_SBS_IMAX_PCT
;
591 msbp
->msb_delta
= tp
->t_imaxpct_delta
;
594 if (tp
->t_rextsize_delta
!= 0) {
595 msbp
->msb_field
= XFS_SBS_REXTSIZE
;
596 msbp
->msb_delta
= tp
->t_rextsize_delta
;
599 if (tp
->t_rbmblocks_delta
!= 0) {
600 msbp
->msb_field
= XFS_SBS_RBMBLOCKS
;
601 msbp
->msb_delta
= tp
->t_rbmblocks_delta
;
604 if (tp
->t_rblocks_delta
!= 0) {
605 msbp
->msb_field
= XFS_SBS_RBLOCKS
;
606 msbp
->msb_delta
= tp
->t_rblocks_delta
;
609 if (tp
->t_rextents_delta
!= 0) {
610 msbp
->msb_field
= XFS_SBS_REXTENTS
;
611 msbp
->msb_delta
= tp
->t_rextents_delta
;
614 if (tp
->t_rextslog_delta
!= 0) {
615 msbp
->msb_field
= XFS_SBS_REXTSLOG
;
616 msbp
->msb_delta
= tp
->t_rextslog_delta
;
622 * If we need to change anything, do it.
625 error
= xfs_mod_incore_sb_batch(tp
->t_mountp
, msb
,
626 (uint
)(msbp
- msb
), rsvd
);
628 goto out_undo_ifreecount
;
635 xfs_icsb_modify_counters(mp
, XFS_SBS_IFREE
, -ifreedelta
, rsvd
);
638 xfs_icsb_modify_counters(mp
, XFS_SBS_ICOUNT
, -idelta
, rsvd
);
641 xfs_icsb_modify_counters(mp
, XFS_SBS_FDBLOCKS
, -blkdelta
, rsvd
);
648 * Add the given log item to the transaction's list of log items.
650 * The log item will now point to its new descriptor with its li_desc field.
654 struct xfs_trans
*tp
,
655 struct xfs_log_item
*lip
)
657 struct xfs_log_item_desc
*lidp
;
659 ASSERT(lip
->li_mountp
== tp
->t_mountp
);
660 ASSERT(lip
->li_ailp
== tp
->t_mountp
->m_ail
);
662 lidp
= kmem_zone_zalloc(xfs_log_item_desc_zone
, KM_SLEEP
| KM_NOFS
);
664 lidp
->lid_item
= lip
;
666 list_add_tail(&lidp
->lid_trans
, &tp
->t_items
);
672 xfs_trans_free_item_desc(
673 struct xfs_log_item_desc
*lidp
)
675 list_del_init(&lidp
->lid_trans
);
676 kmem_zone_free(xfs_log_item_desc_zone
, lidp
);
680 * Unlink and free the given descriptor.
684 struct xfs_log_item
*lip
)
686 xfs_trans_free_item_desc(lip
->li_desc
);
691 * Unlock all of the items of a transaction and free all the descriptors
692 * of that transaction.
695 xfs_trans_free_items(
696 struct xfs_trans
*tp
,
697 xfs_lsn_t commit_lsn
,
700 struct xfs_log_item_desc
*lidp
, *next
;
702 list_for_each_entry_safe(lidp
, next
, &tp
->t_items
, lid_trans
) {
703 struct xfs_log_item
*lip
= lidp
->lid_item
;
707 if (commit_lsn
!= NULLCOMMITLSN
)
708 lip
->li_ops
->iop_committing(lip
, commit_lsn
);
709 if (flags
& XFS_TRANS_ABORT
)
710 lip
->li_flags
|= XFS_LI_ABORTED
;
711 lip
->li_ops
->iop_unlock(lip
);
713 xfs_trans_free_item_desc(lidp
);
718 xfs_log_item_batch_insert(
719 struct xfs_ail
*ailp
,
720 struct xfs_ail_cursor
*cur
,
721 struct xfs_log_item
**log_items
,
723 xfs_lsn_t commit_lsn
)
727 spin_lock(&ailp
->xa_lock
);
728 /* xfs_trans_ail_update_bulk drops ailp->xa_lock */
729 xfs_trans_ail_update_bulk(ailp
, cur
, log_items
, nr_items
, commit_lsn
);
731 for (i
= 0; i
< nr_items
; i
++) {
732 struct xfs_log_item
*lip
= log_items
[i
];
734 lip
->li_ops
->iop_unpin(lip
, 0);
739 * Bulk operation version of xfs_trans_committed that takes a log vector of
740 * items to insert into the AIL. This uses bulk AIL insertion techniques to
741 * minimise lock traffic.
743 * If we are called with the aborted flag set, it is because a log write during
744 * a CIL checkpoint commit has failed. In this case, all the items in the
745 * checkpoint have already gone through iop_commited and iop_unlock, which
746 * means that checkpoint commit abort handling is treated exactly the same
747 * as an iclog write error even though we haven't started any IO yet. Hence in
748 * this case all we need to do is iop_committed processing, followed by an
749 * iop_unpin(aborted) call.
751 * The AIL cursor is used to optimise the insert process. If commit_lsn is not
752 * at the end of the AIL, the insert cursor avoids the need to walk
753 * the AIL to find the insertion point on every xfs_log_item_batch_insert()
754 * call. This saves a lot of needless list walking and is a net win, even
755 * though it slightly increases that amount of AIL lock traffic to set it up
759 xfs_trans_committed_bulk(
760 struct xfs_ail
*ailp
,
761 struct xfs_log_vec
*log_vector
,
762 xfs_lsn_t commit_lsn
,
765 #define LOG_ITEM_BATCH_SIZE 32
766 struct xfs_log_item
*log_items
[LOG_ITEM_BATCH_SIZE
];
767 struct xfs_log_vec
*lv
;
768 struct xfs_ail_cursor cur
;
771 spin_lock(&ailp
->xa_lock
);
772 xfs_trans_ail_cursor_last(ailp
, &cur
, commit_lsn
);
773 spin_unlock(&ailp
->xa_lock
);
775 /* unpin all the log items */
776 for (lv
= log_vector
; lv
; lv
= lv
->lv_next
) {
777 struct xfs_log_item
*lip
= lv
->lv_item
;
781 lip
->li_flags
|= XFS_LI_ABORTED
;
782 item_lsn
= lip
->li_ops
->iop_committed(lip
, commit_lsn
);
784 /* item_lsn of -1 means the item needs no further processing */
785 if (XFS_LSN_CMP(item_lsn
, (xfs_lsn_t
)-1) == 0)
789 * if we are aborting the operation, no point in inserting the
790 * object into the AIL as we are in a shutdown situation.
793 ASSERT(XFS_FORCED_SHUTDOWN(ailp
->xa_mount
));
794 lip
->li_ops
->iop_unpin(lip
, 1);
798 if (item_lsn
!= commit_lsn
) {
801 * Not a bulk update option due to unusual item_lsn.
802 * Push into AIL immediately, rechecking the lsn once
803 * we have the ail lock. Then unpin the item. This does
804 * not affect the AIL cursor the bulk insert path is
807 spin_lock(&ailp
->xa_lock
);
808 if (XFS_LSN_CMP(item_lsn
, lip
->li_lsn
) > 0)
809 xfs_trans_ail_update(ailp
, lip
, item_lsn
);
811 spin_unlock(&ailp
->xa_lock
);
812 lip
->li_ops
->iop_unpin(lip
, 0);
816 /* Item is a candidate for bulk AIL insert. */
817 log_items
[i
++] = lv
->lv_item
;
818 if (i
>= LOG_ITEM_BATCH_SIZE
) {
819 xfs_log_item_batch_insert(ailp
, &cur
, log_items
,
820 LOG_ITEM_BATCH_SIZE
, commit_lsn
);
825 /* make sure we insert the remainder! */
827 xfs_log_item_batch_insert(ailp
, &cur
, log_items
, i
, commit_lsn
);
829 spin_lock(&ailp
->xa_lock
);
830 xfs_trans_ail_cursor_done(&cur
);
831 spin_unlock(&ailp
->xa_lock
);
835 * Commit the given transaction to the log.
837 * XFS disk error handling mechanism is not based on a typical
838 * transaction abort mechanism. Logically after the filesystem
839 * gets marked 'SHUTDOWN', we can't let any new transactions
840 * be durable - ie. committed to disk - because some metadata might
841 * be inconsistent. In such cases, this returns an error, and the
842 * caller may assume that all locked objects joined to the transaction
843 * have already been unlocked as if the commit had succeeded.
844 * Do not reference the transaction structure after this call.
848 struct xfs_trans
*tp
,
851 struct xfs_mount
*mp
= tp
->t_mountp
;
852 xfs_lsn_t commit_lsn
= -1;
855 int sync
= tp
->t_flags
& XFS_TRANS_SYNC
;
858 * Determine whether this commit is releasing a permanent
859 * log reservation or not.
861 if (flags
& XFS_TRANS_RELEASE_LOG_RES
) {
862 ASSERT(tp
->t_flags
& XFS_TRANS_PERM_LOG_RES
);
863 log_flags
= XFS_LOG_REL_PERM_RESERV
;
867 * If there is nothing to be logged by the transaction,
868 * then unlock all of the items associated with the
869 * transaction and free the transaction structure.
870 * Also make sure to return any reserved blocks to
873 if (!(tp
->t_flags
& XFS_TRANS_DIRTY
))
876 if (XFS_FORCED_SHUTDOWN(mp
)) {
881 ASSERT(tp
->t_ticket
!= NULL
);
884 * If we need to update the superblock, then do it now.
886 if (tp
->t_flags
& XFS_TRANS_SB_DIRTY
)
887 xfs_trans_apply_sb_deltas(tp
);
888 xfs_trans_apply_dquot_deltas(tp
);
890 xfs_log_commit_cil(mp
, tp
, &commit_lsn
, flags
);
892 current_restore_flags_nested(&tp
->t_pflags
, PF_FSTRANS
);
896 * If the transaction needs to be synchronous, then force the
897 * log out now and wait for it.
900 error
= _xfs_log_force_lsn(mp
, commit_lsn
, XFS_LOG_SYNC
, NULL
);
901 XFS_STATS_INC(xs_trans_sync
);
903 XFS_STATS_INC(xs_trans_async
);
909 xfs_trans_unreserve_and_mod_sb(tp
);
912 * It is indeed possible for the transaction to be not dirty but
913 * the dqinfo portion to be. All that means is that we have some
914 * (non-persistent) quota reservations that need to be unreserved.
916 xfs_trans_unreserve_and_mod_dquots(tp
);
918 commit_lsn
= xfs_log_done(mp
, tp
->t_ticket
, NULL
, log_flags
);
919 if (commit_lsn
== -1 && !error
)
922 current_restore_flags_nested(&tp
->t_pflags
, PF_FSTRANS
);
923 xfs_trans_free_items(tp
, NULLCOMMITLSN
, error
? XFS_TRANS_ABORT
: 0);
926 XFS_STATS_INC(xs_trans_empty
);
931 * Unlock all of the transaction's items and free the transaction.
932 * The transaction must not have modified any of its items, because
933 * there is no way to restore them to their previous state.
935 * If the transaction has made a log reservation, make sure to release
944 xfs_mount_t
*mp
= tp
->t_mountp
;
947 * See if the caller is being too lazy to figure out if
948 * the transaction really needs an abort.
950 if ((flags
& XFS_TRANS_ABORT
) && !(tp
->t_flags
& XFS_TRANS_DIRTY
))
951 flags
&= ~XFS_TRANS_ABORT
;
953 * See if the caller is relying on us to shut down the
954 * filesystem. This happens in paths where we detect
955 * corruption and decide to give up.
957 if ((tp
->t_flags
& XFS_TRANS_DIRTY
) && !XFS_FORCED_SHUTDOWN(mp
)) {
958 XFS_ERROR_REPORT("xfs_trans_cancel", XFS_ERRLEVEL_LOW
, mp
);
959 xfs_force_shutdown(mp
, SHUTDOWN_CORRUPT_INCORE
);
962 if (!(flags
& XFS_TRANS_ABORT
) && !XFS_FORCED_SHUTDOWN(mp
)) {
963 struct xfs_log_item_desc
*lidp
;
965 list_for_each_entry(lidp
, &tp
->t_items
, lid_trans
)
966 ASSERT(!(lidp
->lid_item
->li_type
== XFS_LI_EFD
));
969 xfs_trans_unreserve_and_mod_sb(tp
);
970 xfs_trans_unreserve_and_mod_dquots(tp
);
973 if (flags
& XFS_TRANS_RELEASE_LOG_RES
) {
974 ASSERT(tp
->t_flags
& XFS_TRANS_PERM_LOG_RES
);
975 log_flags
= XFS_LOG_REL_PERM_RESERV
;
979 xfs_log_done(mp
, tp
->t_ticket
, NULL
, log_flags
);
982 /* mark this thread as no longer being in a transaction */
983 current_restore_flags_nested(&tp
->t_pflags
, PF_FSTRANS
);
985 xfs_trans_free_items(tp
, NULLCOMMITLSN
, flags
);
990 * Roll from one trans in the sequence of PERMANENT transactions to
991 * the next: permanent transactions are only flushed out when
992 * committed with XFS_TRANS_RELEASE_LOG_RES, but we still want as soon
993 * as possible to let chunks of it go to the log. So we commit the
994 * chunk we've been working on and get a new transaction to continue.
998 struct xfs_trans
**tpp
,
999 struct xfs_inode
*dp
)
1001 struct xfs_trans
*trans
;
1002 struct xfs_trans_res tres
;
1006 * Ensure that the inode is always logged.
1009 xfs_trans_log_inode(trans
, dp
, XFS_ILOG_CORE
);
1012 * Copy the critical parameters from one trans to the next.
1014 tres
.tr_logres
= trans
->t_log_res
;
1015 tres
.tr_logcount
= trans
->t_log_count
;
1016 *tpp
= xfs_trans_dup(trans
);
1019 * Commit the current transaction.
1020 * If this commit failed, then it'd just unlock those items that
1021 * are not marked ihold. That also means that a filesystem shutdown
1022 * is in progress. The caller takes the responsibility to cancel
1023 * the duplicate transaction that gets returned.
1025 error
= xfs_trans_commit(trans
, 0);
1032 * transaction commit worked ok so we can drop the extra ticket
1033 * reference that we gained in xfs_trans_dup()
1035 xfs_log_ticket_put(trans
->t_ticket
);
1039 * Reserve space in the log for th next transaction.
1040 * This also pushes items in the "AIL", the list of logged items,
1041 * out to disk if they are taking up space at the tail of the log
1042 * that we want to use. This requires that either nothing be locked
1043 * across this call, or that anything that is locked be logged in
1044 * the prior and the next transactions.
1046 tres
.tr_logflags
= XFS_TRANS_PERM_LOG_RES
;
1047 error
= xfs_trans_reserve(trans
, &tres
, 0, 0);
1049 * Ensure that the inode is in the new transaction and locked.
1054 xfs_trans_ijoin(trans
, dp
, 0);