2 * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it would be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
12 * Further, this software is distributed without any warranty that it is
13 * free of the rightful claim of any third person regarding infringement
14 * or the like. Any license provided herein, whether implied or
15 * otherwise, applies only to this software file. Patent licenses, if
16 * any, provided herein do not apply to combinations of this program with
17 * other software, or any other product whatsoever.
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write the Free Software Foundation, Inc., 59
21 * Temple Place - Suite 330, Boston MA 02111-1307, USA.
23 * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
24 * Mountain View, CA 94043, or:
28 * For further information regarding this notice, see:
30 * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
34 * High level interface routines for log manager
38 #include "xfs_macros.h"
39 #include "xfs_types.h"
44 #include "xfs_trans.h"
46 #include "xfs_dmapi.h"
47 #include "xfs_mount.h"
48 #include "xfs_error.h"
49 #include "xfs_log_priv.h"
50 #include "xfs_buf_item.h"
51 #include "xfs_alloc_btree.h"
52 #include "xfs_log_recover.h"
55 #include "xfs_trans_priv.h"
58 #define xlog_write_adv_cnt(ptr, len, off, bytes) \
63 /* Local miscellaneous function prototypes */
64 STATIC
int xlog_bdstrat_cb(struct xfs_buf
*);
65 STATIC
int xlog_commit_record(xfs_mount_t
*mp
, xlog_ticket_t
*ticket
,
66 xlog_in_core_t
**, xfs_lsn_t
*);
67 STATIC xlog_t
* xlog_alloc_log(xfs_mount_t
*mp
,
69 xfs_daddr_t blk_offset
,
71 STATIC
int xlog_space_left(xlog_t
*log
, int cycle
, int bytes
);
72 STATIC
int xlog_sync(xlog_t
*log
, xlog_in_core_t
*iclog
);
73 STATIC
void xlog_unalloc_log(xlog_t
*log
);
74 STATIC
int xlog_write(xfs_mount_t
*mp
, xfs_log_iovec_t region
[],
75 int nentries
, xfs_log_ticket_t tic
,
77 xlog_in_core_t
**commit_iclog
,
80 /* local state machine functions */
81 STATIC
void xlog_state_done_syncing(xlog_in_core_t
*iclog
, int);
82 STATIC
void xlog_state_do_callback(xlog_t
*log
,int aborted
, xlog_in_core_t
*iclog
);
83 STATIC
int xlog_state_get_iclog_space(xlog_t
*log
,
85 xlog_in_core_t
**iclog
,
86 xlog_ticket_t
*ticket
,
89 STATIC
void xlog_state_put_ticket(xlog_t
*log
,
91 STATIC
int xlog_state_release_iclog(xlog_t
*log
,
92 xlog_in_core_t
*iclog
);
93 STATIC
void xlog_state_switch_iclogs(xlog_t
*log
,
94 xlog_in_core_t
*iclog
,
96 STATIC
int xlog_state_sync(xlog_t
*log
, xfs_lsn_t lsn
, uint flags
);
97 STATIC
int xlog_state_sync_all(xlog_t
*log
, uint flags
);
98 STATIC
void xlog_state_want_sync(xlog_t
*log
, xlog_in_core_t
*iclog
);
100 /* local functions to manipulate grant head */
101 STATIC
int xlog_grant_log_space(xlog_t
*log
,
102 xlog_ticket_t
*xtic
);
103 STATIC
void xlog_grant_push_ail(xfs_mount_t
*mp
,
105 STATIC
void xlog_regrant_reserve_log_space(xlog_t
*log
,
106 xlog_ticket_t
*ticket
);
107 STATIC
int xlog_regrant_write_log_space(xlog_t
*log
,
108 xlog_ticket_t
*ticket
);
109 STATIC
void xlog_ungrant_log_space(xlog_t
*log
,
110 xlog_ticket_t
*ticket
);
113 /* local ticket functions */
114 STATIC
void xlog_state_ticket_alloc(xlog_t
*log
);
115 STATIC xlog_ticket_t
*xlog_ticket_get(xlog_t
*log
,
120 STATIC
void xlog_ticket_put(xlog_t
*log
, xlog_ticket_t
*ticket
);
122 /* local debug functions */
123 #if defined(DEBUG) && !defined(XLOG_NOLOG)
124 STATIC
void xlog_verify_dest_ptr(xlog_t
*log
, __psint_t ptr
);
126 STATIC
void xlog_verify_disk_cycle_no(xlog_t
*log
, xlog_in_core_t
*iclog
);
128 STATIC
void xlog_verify_grant_head(xlog_t
*log
, int equals
);
129 STATIC
void xlog_verify_iclog(xlog_t
*log
, xlog_in_core_t
*iclog
,
130 int count
, boolean_t syncing
);
131 STATIC
void xlog_verify_tail_lsn(xlog_t
*log
, xlog_in_core_t
*iclog
,
134 #define xlog_verify_dest_ptr(a,b)
135 #define xlog_verify_disk_cycle_no(a,b)
136 #define xlog_verify_grant_head(a,b)
137 #define xlog_verify_iclog(a,b,c,d)
138 #define xlog_verify_tail_lsn(a,b,c)
141 int xlog_iclogs_empty(xlog_t
*log
);
144 int xlog_do_error
= 0;
145 int xlog_req_num
= 0;
146 int xlog_error_mod
= 33;
149 #define XLOG_FORCED_SHUTDOWN(log) (log->l_flags & XLOG_IO_ERROR)
152 * 0 => disable log manager
153 * 1 => enable log manager
154 * 2 => enable log manager and log debugging
156 #if defined(XLOG_NOLOG) || defined(DEBUG)
161 #if defined(XFS_LOG_TRACE)
163 xlog_trace_loggrant(xlog_t
*log
, xlog_ticket_t
*tic
, xfs_caddr_t string
)
165 if (! log
->l_grant_trace
)
166 log
->l_grant_trace
= ktrace_alloc(1024, KM_SLEEP
);
168 ktrace_enter(log
->l_grant_trace
,
170 (void *)log
->l_reserve_headq
,
171 (void *)log
->l_write_headq
,
172 (void *)((unsigned long)log
->l_grant_reserve_cycle
),
173 (void *)((unsigned long)log
->l_grant_reserve_bytes
),
174 (void *)((unsigned long)log
->l_grant_write_cycle
),
175 (void *)((unsigned long)log
->l_grant_write_bytes
),
176 (void *)((unsigned long)log
->l_curr_cycle
),
177 (void *)((unsigned long)log
->l_curr_block
),
178 (void *)((unsigned long)CYCLE_LSN(log
->l_tail_lsn
, ARCH_NOCONVERT
)),
179 (void *)((unsigned long)BLOCK_LSN(log
->l_tail_lsn
, ARCH_NOCONVERT
)),
181 (void *)((unsigned long)13),
182 (void *)((unsigned long)14),
183 (void *)((unsigned long)15),
184 (void *)((unsigned long)16));
188 xlog_trace_tic(xlog_t
*log
, xlog_ticket_t
*tic
)
191 log
->l_trace
= ktrace_alloc(256, KM_SLEEP
);
193 ktrace_enter(log
->l_trace
,
195 (void *)((unsigned long)tic
->t_curr_res
),
196 (void *)((unsigned long)tic
->t_unit_res
),
197 (void *)((unsigned long)tic
->t_ocnt
),
198 (void *)((unsigned long)tic
->t_cnt
),
199 (void *)((unsigned long)tic
->t_flags
),
200 (void *)((unsigned long)7),
201 (void *)((unsigned long)8),
202 (void *)((unsigned long)9),
203 (void *)((unsigned long)10),
204 (void *)((unsigned long)11),
205 (void *)((unsigned long)12),
206 (void *)((unsigned long)13),
207 (void *)((unsigned long)14),
208 (void *)((unsigned long)15),
209 (void *)((unsigned long)16));
213 xlog_trace_iclog(xlog_in_core_t
*iclog
, uint state
)
219 if (!iclog
->ic_trace
)
220 iclog
->ic_trace
= ktrace_alloc(256, KM_SLEEP
);
221 ktrace_enter(iclog
->ic_trace
,
222 (void *)((unsigned long)state
),
223 (void *)((unsigned long)pid
),
241 #define xlog_trace_loggrant(log,tic,string)
242 #define xlog_trace_iclog(iclog,state)
243 #endif /* XFS_LOG_TRACE */
248 * 1. currblock field gets updated at startup and after in-core logs
249 * marked as with WANT_SYNC.
253 * This routine is called when a user of a log manager ticket is done with
254 * the reservation. If the ticket was ever used, then a commit record for
255 * the associated transaction is written out as a log operation header with
256 * no data. The flag XLOG_TIC_INITED is set when the first write occurs with
257 * a given ticket. If the ticket was one with a permanent reservation, then
258 * a few operations are done differently. Permanent reservation tickets by
259 * default don't release the reservation. They just commit the current
260 * transaction with the belief that the reservation is still needed. A flag
261 * must be passed in before permanent reservations are actually released.
262 * When these type of tickets are not released, they need to be set into
263 * the inited state again. By doing this, a start record will be written
264 * out when the next write occurs.
267 xfs_log_done(xfs_mount_t
*mp
,
268 xfs_log_ticket_t xtic
,
272 xlog_t
*log
= mp
->m_log
;
273 xlog_ticket_t
*ticket
= (xfs_log_ticket_t
) xtic
;
276 #if defined(DEBUG) || defined(XLOG_NOLOG)
277 if (! xlog_debug
&& xlog_devt
== log
->l_dev
)
281 if (XLOG_FORCED_SHUTDOWN(log
) ||
283 * If nothing was ever written, don't write out commit record.
284 * If we get an error, just continue and give back the log ticket.
286 (((ticket
->t_flags
& XLOG_TIC_INITED
) == 0) &&
287 (xlog_commit_record(mp
, ticket
,
288 (xlog_in_core_t
**)iclog
, &lsn
)))) {
289 lsn
= (xfs_lsn_t
) -1;
290 if (ticket
->t_flags
& XLOG_TIC_PERM_RESERV
) {
291 flags
|= XFS_LOG_REL_PERM_RESERV
;
296 if ((ticket
->t_flags
& XLOG_TIC_PERM_RESERV
) == 0 ||
297 (flags
& XFS_LOG_REL_PERM_RESERV
)) {
299 * Release ticket if not permanent reservation or a specifc
300 * request has been made to release a permanent reservation.
302 xlog_ungrant_log_space(log
, ticket
);
303 xlog_state_put_ticket(log
, ticket
);
305 xlog_regrant_reserve_log_space(log
, ticket
);
308 /* If this ticket was a permanent reservation and we aren't
309 * trying to release it, reset the inited flags; so next time
310 * we write, a start record will be written out.
312 if ((ticket
->t_flags
& XLOG_TIC_PERM_RESERV
) &&
313 (flags
& XFS_LOG_REL_PERM_RESERV
) == 0)
314 ticket
->t_flags
|= XLOG_TIC_INITED
;
321 * Force the in-core log to disk. If flags == XFS_LOG_SYNC,
322 * the force is done synchronously.
324 * Asynchronous forces are implemented by setting the WANT_SYNC
325 * bit in the appropriate in-core log and then returning.
327 * Synchronous forces are implemented with a semaphore. All callers
328 * to force a given lsn to disk will wait on a semaphore attached to the
329 * specific in-core log. When given in-core log finally completes its
330 * write to disk, that thread will wake up all threads waiting on the
334 xfs_log_force(xfs_mount_t
*mp
,
339 xlog_t
*log
= mp
->m_log
;
341 #if defined(DEBUG) || defined(XLOG_NOLOG)
342 if (! xlog_debug
&& xlog_devt
== log
->l_dev
)
346 ASSERT(flags
& XFS_LOG_FORCE
);
348 XFS_STATS_INC(xfsstats
.xs_log_force
);
350 if ((log
->l_flags
& XLOG_IO_ERROR
) == 0) {
352 rval
= xlog_state_sync_all(log
, flags
);
354 rval
= xlog_state_sync(log
, lsn
, flags
);
356 rval
= XFS_ERROR(EIO
);
361 } /* xfs_log_force */
365 * This function will take a log sequence number and check to see if that
366 * lsn has been flushed to disk. If it has, then the callback function is
367 * called with the callback argument. If the relevant in-core log has not
368 * been synced to disk, we add the callback to the callback list of the
372 xfs_log_notify(xfs_mount_t
*mp
, /* mount of partition */
373 void *iclog_hndl
, /* iclog to hang callback off */
374 xfs_log_callback_t
*cb
)
376 xlog_t
*log
= mp
->m_log
;
377 xlog_in_core_t
*iclog
= (xlog_in_core_t
*)iclog_hndl
;
380 #if defined(DEBUG) || defined(XLOG_NOLOG)
381 if (! xlog_debug
&& xlog_devt
== log
->l_dev
)
386 abortflg
= (iclog
->ic_state
& XLOG_STATE_IOERROR
);
388 ASSERT_ALWAYS((iclog
->ic_state
== XLOG_STATE_ACTIVE
) ||
389 (iclog
->ic_state
== XLOG_STATE_WANT_SYNC
));
391 *(iclog
->ic_callback_tail
) = cb
;
392 iclog
->ic_callback_tail
= &(cb
->cb_next
);
394 LOG_UNLOCK(log
, spl
);
396 cb
->cb_func(cb
->cb_arg
, abortflg
);
399 } /* xfs_log_notify */
402 xfs_log_release_iclog(xfs_mount_t
*mp
,
405 xlog_t
*log
= mp
->m_log
;
406 xlog_in_core_t
*iclog
= (xlog_in_core_t
*)iclog_hndl
;
408 if (xlog_state_release_iclog(log
, iclog
)) {
409 xfs_force_shutdown(mp
, XFS_LOG_IO_ERROR
);
417 * 1. Reserve an amount of on-disk log space and return a ticket corresponding
418 * to the reservation.
419 * 2. Potentially, push buffers at tail of log to disk.
421 * Each reservation is going to reserve extra space for a log record header.
422 * When writes happen to the on-disk log, we don't subtract the length of the
423 * log record header from any reservation. By wasting space in each
424 * reservation, we prevent over allocation problems.
427 xfs_log_reserve(xfs_mount_t
*mp
,
430 xfs_log_ticket_t
*ticket
,
434 xlog_t
*log
= mp
->m_log
;
435 xlog_ticket_t
*internal_ticket
;
438 #if defined(DEBUG) || defined(XLOG_NOLOG)
439 if (! xlog_debug
&& xlog_devt
== log
->l_dev
)
443 ASSERT(client
== XFS_TRANSACTION
|| client
== XFS_LOG
);
444 ASSERT((flags
& XFS_LOG_NOSLEEP
) == 0);
446 if (XLOG_FORCED_SHUTDOWN(log
))
447 return XFS_ERROR(EIO
);
449 XFS_STATS_INC(xfsstats
.xs_try_logspace
);
451 if (*ticket
!= NULL
) {
452 ASSERT(flags
& XFS_LOG_PERM_RESERV
);
453 internal_ticket
= (xlog_ticket_t
*)*ticket
;
454 xlog_grant_push_ail(mp
, internal_ticket
->t_unit_res
);
455 retval
= xlog_regrant_write_log_space(log
, internal_ticket
);
457 /* may sleep if need to allocate more tickets */
458 internal_ticket
= xlog_ticket_get(log
, unit_bytes
, cnt
,
460 *ticket
= internal_ticket
;
461 xlog_grant_push_ail(mp
,
462 (internal_ticket
->t_unit_res
*
463 internal_ticket
->t_cnt
));
464 retval
= xlog_grant_log_space(log
, internal_ticket
);
468 } /* xfs_log_reserve */
472 * Mount a log filesystem
474 * mp - ubiquitous xfs mount point structure
475 * log_dev - device number of on-disk log device
476 * blk_offset - Start block # where block size is 512 bytes (BBSIZE)
477 * num_bblocks - Number of BBSIZE blocks in on-disk log
479 * Return error or zero.
482 xfs_log_mount(xfs_mount_t
*mp
,
484 xfs_daddr_t blk_offset
,
487 if (!(mp
->m_flags
& XFS_MOUNT_NORECOVERY
))
488 cmn_err(CE_NOTE
, "XFS mounting filesystem %s", mp
->m_fsname
);
491 "!Mounting filesystem \"%s\" in no-recovery mode. Filesystem will be inconsistent.",
493 ASSERT(XFS_MTOVFS(mp
)->vfs_flag
& VFS_RDONLY
);
496 mp
->m_log
= xlog_alloc_log(mp
, log_dev
, blk_offset
, num_bblks
);
498 #if defined(DEBUG) || defined(XLOG_NOLOG)
500 cmn_err(CE_NOTE
, "log dev: 0x%x", log_dev
);
505 * skip log recovery on a norecovery mount. pretend it all
508 if (!(mp
->m_flags
& XFS_MOUNT_NORECOVERY
)) {
510 vfs_t
*vfsp
= XFS_MTOVFS(mp
);
511 int readonly
= (vfsp
->vfs_flag
& VFS_RDONLY
);
514 vfsp
->vfs_flag
&= ~VFS_RDONLY
;
516 error
= xlog_recover(mp
->m_log
, readonly
);
519 vfsp
->vfs_flag
|= VFS_RDONLY
;
521 cmn_err(CE_WARN
, "XFS: log mount/recovery failed");
522 xlog_unalloc_log(mp
->m_log
);
527 /* Normal transactions can now occur */
528 mp
->m_log
->l_flags
&= ~XLOG_ACTIVE_RECOVERY
;
530 /* End mounting message in xfs_log_mount_finish */
532 } /* xfs_log_mount */
535 * Finish the recovery of the file system. This is separate from
536 * the xfs_log_mount() call, because it depends on the code in
537 * xfs_mountfs() to read in the root and real-time bitmap inodes
538 * between calling xfs_log_mount() and here.
540 * mp - ubiquitous xfs mount point structure
543 xfs_log_mount_finish(xfs_mount_t
*mp
, int mfsi_flags
)
547 if (!(mp
->m_flags
& XFS_MOUNT_NORECOVERY
))
548 error
= xlog_recover_finish(mp
->m_log
, mfsi_flags
);
551 ASSERT(XFS_MTOVFS(mp
)->vfs_flag
& VFS_RDONLY
);
558 * Unmount processing for the log.
561 xfs_log_unmount(xfs_mount_t
*mp
)
565 error
= xfs_log_unmount_write(mp
);
566 xfs_log_unmount_dealloc(mp
);
571 * Final log writes as part of unmount.
573 * Mark the filesystem clean as unmount happens. Note that during relocation
574 * this routine needs to be executed as part of source-bag while the
575 * deallocation must not be done until source-end.
579 * Unmount record used to have a string "Unmount filesystem--" in the
580 * data section where the "Un" was really a magic number (XLOG_UNMOUNT_TYPE).
581 * We just write the magic number now since that particular field isn't
582 * currently architecture converted and "nUmount" is a bit foo.
583 * As far as I know, there weren't any dependencies on the old behaviour.
587 xfs_log_unmount_write(xfs_mount_t
*mp
)
589 xlog_t
*log
= mp
->m_log
;
590 xlog_in_core_t
*iclog
;
592 xlog_in_core_t
*first_iclog
;
594 xfs_log_iovec_t reg
[1];
595 xfs_log_ticket_t tic
= 0;
600 /* the data section must be 32 bit size aligned */
604 __uint32_t pad2
; /* may as well make it 64 bits */
605 } magic
= { XLOG_UNMOUNT_TYPE
, 0, 0 };
607 #if defined(DEBUG) || defined(XLOG_NOLOG)
608 if (! xlog_debug
&& xlog_devt
== log
->l_dev
)
613 * Don't write out unmount record on read-only mounts.
614 * Or, if we are doing a forced umount (typically because of IO errors).
616 if (XFS_MTOVFS(mp
)->vfs_flag
& VFS_RDONLY
)
619 xfs_log_force(mp
, 0, XFS_LOG_FORCE
|XFS_LOG_SYNC
);
622 first_iclog
= iclog
= log
->l_iclog
;
624 if (!(iclog
->ic_state
& XLOG_STATE_IOERROR
)) {
625 ASSERT(iclog
->ic_state
& XLOG_STATE_ACTIVE
);
626 ASSERT(iclog
->ic_offset
== 0);
628 iclog
= iclog
->ic_next
;
629 } while (iclog
!= first_iclog
);
631 if (! (XLOG_FORCED_SHUTDOWN(log
))) {
632 reg
[0].i_addr
= (void*)&magic
;
633 reg
[0].i_len
= sizeof(magic
);
635 error
= xfs_log_reserve(mp
, 600, 1, &tic
, XFS_LOG
, 0);
637 /* remove inited flag */
638 ((xlog_ticket_t
*)tic
)->t_flags
= 0;
639 error
= xlog_write(mp
, reg
, 1, tic
, &lsn
,
640 NULL
, XLOG_UNMOUNT_TRANS
);
642 * At this point, we're umounting anyway,
643 * so there's no point in transitioning log state
644 * to IOERROR. Just continue...
649 xfs_fs_cmn_err(CE_ALERT
, mp
,
650 "xfs_log_unmount: unmount record failed");
655 iclog
= log
->l_iclog
;
658 xlog_state_want_sync(log
, iclog
);
659 (void) xlog_state_release_iclog(log
, iclog
);
662 if (!(iclog
->ic_state
== XLOG_STATE_ACTIVE
||
663 iclog
->ic_state
== XLOG_STATE_DIRTY
)) {
664 if (!XLOG_FORCED_SHUTDOWN(log
)) {
665 sv_wait(&iclog
->ic_forcesema
, PMEM
,
666 &log
->l_icloglock
, s
);
674 xlog_state_put_ticket(log
, tic
);
677 * We're already in forced_shutdown mode, couldn't
678 * even attempt to write out the unmount transaction.
680 * Go through the motions of sync'ing and releasing
681 * the iclog, even though no I/O will actually happen,
682 * we need to wait for other log I/O's that may already
683 * be in progress. Do this as a separate section of
684 * code so we'll know if we ever get stuck here that
685 * we're in this odd situation of trying to unmount
686 * a file system that went into forced_shutdown as
687 * the result of an unmount..
690 iclog
= log
->l_iclog
;
694 xlog_state_want_sync(log
, iclog
);
695 (void) xlog_state_release_iclog(log
, iclog
);
699 if ( ! ( iclog
->ic_state
== XLOG_STATE_ACTIVE
700 || iclog
->ic_state
== XLOG_STATE_DIRTY
701 || iclog
->ic_state
== XLOG_STATE_IOERROR
) ) {
703 sv_wait(&iclog
->ic_forcesema
, PMEM
,
704 &log
->l_icloglock
, s
);
711 } /* xfs_log_unmount_write */
714 * Deallocate log structures for unmount/relocation.
717 xfs_log_unmount_dealloc(xfs_mount_t
*mp
)
719 xlog_unalloc_log(mp
->m_log
);
723 * Write region vectors to log. The write happens using the space reservation
724 * of the ticket (tic). It is not a requirement that all writes for a given
725 * transaction occur with one call to xfs_log_write().
728 xfs_log_write(xfs_mount_t
* mp
,
729 xfs_log_iovec_t reg
[],
731 xfs_log_ticket_t tic
,
732 xfs_lsn_t
*start_lsn
)
735 xlog_t
*log
= mp
->m_log
;
736 #if defined(DEBUG) || defined(XLOG_NOLOG)
738 if (! xlog_debug
&& xlog_devt
== log
->l_dev
) {
743 if (XLOG_FORCED_SHUTDOWN(log
))
744 return XFS_ERROR(EIO
);
746 if ((error
= xlog_write(mp
, reg
, nentries
, tic
, start_lsn
, NULL
, 0))) {
747 xfs_force_shutdown(mp
, XFS_LOG_IO_ERROR
);
750 } /* xfs_log_write */
754 xfs_log_move_tail(xfs_mount_t
*mp
,
758 xlog_t
*log
= mp
->m_log
;
759 int need_bytes
, free_bytes
, cycle
, bytes
;
762 #if defined(DEBUG) || defined(XLOG_NOLOG)
763 if (!xlog_debug
&& xlog_devt
== log
->l_dev
)
767 if (XLOG_FORCED_SHUTDOWN(log
))
769 ASSERT(!XFS_FORCED_SHUTDOWN(mp
));
772 /* needed since sync_lsn is 64 bits */
774 tail_lsn
= log
->l_last_sync_lsn
;
780 /* Also an illegal lsn. 1 implies that we aren't passing in a legal
784 log
->l_tail_lsn
= tail_lsn
;
786 if ((tic
= log
->l_write_headq
)) {
788 if (log
->l_flags
& XLOG_ACTIVE_RECOVERY
)
789 panic("Recovery problem");
791 cycle
= log
->l_grant_write_cycle
;
792 bytes
= log
->l_grant_write_bytes
;
793 free_bytes
= xlog_space_left(log
, cycle
, bytes
);
795 ASSERT(tic
->t_flags
& XLOG_TIC_PERM_RESERV
);
797 if (free_bytes
< tic
->t_unit_res
&& tail_lsn
!= 1)
800 free_bytes
-= tic
->t_unit_res
;
801 sv_signal(&tic
->t_sema
);
803 } while (tic
!= log
->l_write_headq
);
805 if ((tic
= log
->l_reserve_headq
)) {
807 if (log
->l_flags
& XLOG_ACTIVE_RECOVERY
)
808 panic("Recovery problem");
810 cycle
= log
->l_grant_reserve_cycle
;
811 bytes
= log
->l_grant_reserve_bytes
;
812 free_bytes
= xlog_space_left(log
, cycle
, bytes
);
814 if (tic
->t_flags
& XLOG_TIC_PERM_RESERV
)
815 need_bytes
= tic
->t_unit_res
*tic
->t_cnt
;
817 need_bytes
= tic
->t_unit_res
;
818 if (free_bytes
< need_bytes
&& tail_lsn
!= 1)
821 free_bytes
-= need_bytes
;
822 sv_signal(&tic
->t_sema
);
824 } while (tic
!= log
->l_reserve_headq
);
826 GRANT_UNLOCK(log
, s
);
827 } /* xfs_log_move_tail */
830 * Determine if we have a transaction that has gone to disk
831 * that needs to be covered. Log activity needs to be idle (no AIL and
832 * nothing in the iclogs). And, we need to be in the right state indicating
833 * something has gone out.
836 xfs_log_need_covered(xfs_mount_t
*mp
)
840 xlog_t
*log
= mp
->m_log
;
841 vfs_t
*vfsp
= XFS_MTOVFS(mp
);
843 if (mp
->m_frozen
|| XFS_FORCED_SHUTDOWN(mp
) ||
844 (vfsp
->vfs_flag
& VFS_RDONLY
))
848 if (((log
->l_covered_state
== XLOG_STATE_COVER_NEED
) ||
849 (log
->l_covered_state
== XLOG_STATE_COVER_NEED2
))
850 && !xfs_trans_first_ail(mp
, &gen
)
851 && xlog_iclogs_empty(log
)) {
852 if (log
->l_covered_state
== XLOG_STATE_COVER_NEED
)
853 log
->l_covered_state
= XLOG_STATE_COVER_DONE
;
855 ASSERT(log
->l_covered_state
== XLOG_STATE_COVER_NEED2
);
856 log
->l_covered_state
= XLOG_STATE_COVER_DONE2
;
864 /******************************************************************************
868 ******************************************************************************
871 /* xfs_trans_tail_ail returns 0 when there is nothing in the list.
872 * The log manager must keep track of the last LR which was committed
873 * to disk. The lsn of this LR will become the new tail_lsn whenever
874 * xfs_trans_tail_ail returns 0. If we don't do this, we run into
875 * the situation where stuff could be written into the log but nothing
876 * was ever in the AIL when asked. Eventually, we panic since the
877 * tail hits the head.
879 * We may be holding the log iclog lock upon entering this routine.
882 xlog_assign_tail_lsn(xfs_mount_t
*mp
)
886 xlog_t
*log
= mp
->m_log
;
888 tail_lsn
= xfs_trans_tail_ail(mp
);
891 log
->l_tail_lsn
= tail_lsn
;
893 tail_lsn
= log
->l_tail_lsn
= log
->l_last_sync_lsn
;
894 GRANT_UNLOCK(log
, s
);
897 } /* xlog_assign_tail_lsn */
901 * Return the space in the log between the tail and the head. The head
902 * is passed in the cycle/bytes formal parms. In the special case where
903 * the reserve head has wrapped passed the tail, this calculation is no
904 * longer valid. In this case, just return 0 which means there is no space
905 * in the log. This works for all places where this function is called
906 * with the reserve head. Of course, if the write head were to ever
907 * wrap the tail, we should blow up. Rather than catch this case here,
908 * we depend on other ASSERTions in other parts of the code. XXXmiken
910 * This code also handles the case where the reservation head is behind
911 * the tail. The details of this case are described below, but the end
912 * result is that we return the size of the log as the amount of space left.
915 xlog_space_left(xlog_t
*log
, int cycle
, int bytes
)
921 tail_bytes
= BBTOB(BLOCK_LSN(log
->l_tail_lsn
, ARCH_NOCONVERT
));
922 tail_cycle
= CYCLE_LSN(log
->l_tail_lsn
, ARCH_NOCONVERT
);
923 if ((tail_cycle
== cycle
) && (bytes
>= tail_bytes
)) {
924 free_bytes
= log
->l_logsize
- (bytes
- tail_bytes
);
925 } else if ((tail_cycle
+ 1) < cycle
) {
927 } else if (tail_cycle
< cycle
) {
928 ASSERT(tail_cycle
== (cycle
- 1));
929 free_bytes
= tail_bytes
- bytes
;
932 * The reservation head is behind the tail.
933 * This can only happen when the AIL is empty so the tail
934 * is equal to the head and the l_roundoff value in the
935 * log structure is taking up the difference between the
936 * reservation head and the tail. The bytes accounted for
937 * by the l_roundoff field are temporarily 'lost' to the
938 * reservation mechanism, but they are cleaned up when the
939 * log buffers that created them are reused. These lost
940 * bytes are what allow the reservation head to fall behind
941 * the tail in the case that the log is 'empty'.
942 * In this case we just want to return the size of the
943 * log as the amount of space left.
945 /* This assert does not take into account padding from striped log writes *
946 ASSERT((tail_cycle == (cycle + 1)) ||
947 ((bytes + log->l_roundoff) >= tail_bytes));
949 free_bytes
= log
->l_logsize
;
952 } /* xlog_space_left */
956 * Log function which is called when an io completes.
958 * The log manager needs its own routine, in order to control what
959 * happens with the buffer after the write completes.
962 xlog_iodone(xfs_buf_t
*bp
)
964 xlog_in_core_t
*iclog
;
967 iclog
= XFS_BUF_FSPRIVATE(bp
, xlog_in_core_t
*);
968 ASSERT(XFS_BUF_FSPRIVATE2(bp
, unsigned long) == (unsigned long) 2);
969 XFS_BUF_SET_FSPRIVATE2(bp
, (unsigned long)1);
973 * Race to shutdown the filesystem if we see an error.
975 if (XFS_BUF_GETERROR(bp
)) {
976 /* Some versions of cpp barf on the recursive definition of
977 * ic_log -> hic_fields.ic_log and expand ic_log twice when
978 * it is passed through two macros. Workaround for broken cpp
981 xfs_ioerror_alert("xlog_iodone",
982 iclog
->ic_log
->l_mp
, bp
, XFS_BUF_ADDR(bp
));
985 xfs_force_shutdown(l
->l_mp
, XFS_LOG_IO_ERROR
);
987 * This flag will be propagated to the trans-committed
988 * callback routines to let them know that the log-commit
991 aborted
= XFS_LI_ABORTED
;
992 } else if (iclog
->ic_state
& XLOG_STATE_IOERROR
) {
993 aborted
= XFS_LI_ABORTED
;
995 xlog_state_done_syncing(iclog
, aborted
);
996 if (!(XFS_BUF_ISASYNC(bp
))) {
998 * Corresponding psema() will be done in bwrite(). If we don't
999 * vsema() here, panic.
1001 XFS_BUF_V_IODONESEMA(bp
);
1006 * The bdstrat callback function for log bufs. This gives us a central
1007 * place to trap bufs in case we get hit by a log I/O error and need to
1008 * shutdown. Actually, in practice, even when we didn't get a log error,
1009 * we transition the iclogs to IOERROR state *after* flushing all existing
1010 * iclogs to disk. This is because we don't want anymore new transactions to be
1011 * started or completed afterwards.
1014 xlog_bdstrat_cb(struct xfs_buf
*bp
)
1016 xlog_in_core_t
*iclog
;
1018 iclog
= XFS_BUF_FSPRIVATE(bp
, xlog_in_core_t
*);
1020 if ((iclog
->ic_state
& XLOG_STATE_IOERROR
) == 0) {
1021 /* note for irix bstrat will need struct bdevsw passed
1022 * Fix the following macro if the code ever is merged
1028 xfs_buftrace("XLOG__BDSTRAT IOERROR", bp
);
1029 XFS_BUF_ERROR(bp
, EIO
);
1032 return (XFS_ERROR(EIO
));
1038 * Return size of each in-core log record buffer.
1040 * Low memory machines only get 2 16KB buffers. We don't want to waste
1041 * memory here. However, all other machines get at least 2 32KB buffers.
1042 * The number is hard coded because we don't care about the minimum
1043 * memory size, just 32MB systems.
1045 * If the filesystem blocksize is too large, we may need to choose a
1046 * larger size since the directory code currently logs entire blocks.
1047 * XXXmiken XXXcurtis
1051 xlog_get_iclog_buffer_size(xfs_mount_t
*mp
,
1057 #if defined(DEBUG) || defined(XLOG_NOLOG)
1059 * When logbufs == 0, someone has disabled the log from the FSTAB
1060 * file. This is not a documented feature. We need to set xlog_debug
1061 * to zero (this deactivates the log) and set xlog_devt to the
1062 * appropriate dev_t. Only one filesystem may be affected as such
1063 * since this is just a performance hack to test what we might be able
1064 * to get if the log were not present.
1066 if (mp
->m_logbufs
== 0) {
1068 xlog_devt
= log
->l_dev
;
1069 log
->l_iclog_bufs
= XLOG_NUM_ICLOGS
;
1074 * This is the normal path. If m_logbufs == -1, then the
1075 * admin has chosen to use the system defaults for logbuffers.
1077 if (mp
->m_logbufs
== -1)
1078 log
->l_iclog_bufs
= XLOG_NUM_ICLOGS
;
1080 log
->l_iclog_bufs
= mp
->m_logbufs
;
1082 #if defined(DEBUG) || defined(XLOG_NOLOG)
1083 /* We are reactivating a filesystem after it was active */
1084 if (log
->l_dev
== xlog_devt
) {
1092 * Buffer size passed in from mount system call.
1094 if (mp
->m_logbsize
!= -1) {
1095 size
= log
->l_iclog_size
= mp
->m_logbsize
;
1096 log
->l_iclog_size_log
= 0;
1098 log
->l_iclog_size_log
++;
1102 if (XFS_SB_VERSION_HASLOGV2(&mp
->m_sb
)) {
1103 /* # headers = size / 32K
1104 * one header holds cycles from 32K of data
1107 xhdrs
= mp
->m_logbsize
/ XLOG_HEADER_CYCLE_SIZE
;
1108 if (mp
->m_logbsize
% XLOG_HEADER_CYCLE_SIZE
)
1110 log
->l_iclog_hsize
= xhdrs
<< BBSHIFT
;
1111 log
->l_iclog_heads
= xhdrs
;
1113 ASSERT(mp
->m_logbsize
<= XLOG_BIG_RECORD_BSIZE
);
1114 log
->l_iclog_hsize
= BBSIZE
;
1115 log
->l_iclog_heads
= 1;
1121 * Special case machines that have less than 32MB of memory.
1122 * All machines with more memory use 32KB buffers.
1124 if (xfs_physmem
<= btoc(32*1024*1024)) {
1125 /* Don't change; min configuration */
1126 log
->l_iclog_size
= XLOG_RECORD_BSIZE
; /* 16k */
1127 log
->l_iclog_size_log
= XLOG_RECORD_BSHIFT
;
1129 log
->l_iclog_size
= XLOG_BIG_RECORD_BSIZE
; /* 32k */
1130 log
->l_iclog_size_log
= XLOG_BIG_RECORD_BSHIFT
;
1133 /* the default log size is 16k or 32k which is one header sector */
1134 log
->l_iclog_hsize
= BBSIZE
;
1135 log
->l_iclog_heads
= 1;
1138 * For 16KB, we use 3 32KB buffers. For 32KB block sizes, we use
1139 * 4 32KB buffers. For 64KB block sizes, we use 8 32KB buffers.
1141 if (mp
->m_sb
.sb_blocksize
>= 16*1024) {
1142 log
->l_iclog_size
= XLOG_BIG_RECORD_BSIZE
;
1143 log
->l_iclog_size_log
= XLOG_BIG_RECORD_BSHIFT
;
1144 if (mp
->m_logbufs
== -1) {
1145 switch (mp
->m_sb
.sb_blocksize
) {
1146 case 16*1024: /* 16 KB */
1147 log
->l_iclog_bufs
= 3;
1149 case 32*1024: /* 32 KB */
1150 log
->l_iclog_bufs
= 4;
1152 case 64*1024: /* 64 KB */
1153 log
->l_iclog_bufs
= 8;
1156 xlog_panic("XFS: Illegal blocksize");
1161 } /* xlog_get_iclog_buffer_size */
1165 * This routine initializes some of the log structure for a given mount point.
1166 * Its primary purpose is to fill in enough, so recovery can occur. However,
1167 * some other stuff may be filled in too.
1170 xlog_alloc_log(xfs_mount_t
*mp
,
1172 xfs_daddr_t blk_offset
,
1176 xlog_rec_header_t
*head
;
1177 xlog_in_core_t
**iclogp
;
1178 xlog_in_core_t
*iclog
, *prev_iclog
=NULL
;
1183 log
= (xlog_t
*)kmem_zalloc(sizeof(xlog_t
), KM_SLEEP
);
1186 log
->l_dev
= log_dev
;
1187 log
->l_logsize
= BBTOB(num_bblks
);
1188 log
->l_logBBstart
= blk_offset
;
1189 log
->l_logBBsize
= num_bblks
;
1190 log
->l_covered_state
= XLOG_STATE_COVER_IDLE
;
1191 log
->l_flags
|= XLOG_ACTIVE_RECOVERY
;
1193 log
->l_prev_block
= -1;
1194 ASSIGN_ANY_LSN(log
->l_tail_lsn
, 1, 0, ARCH_NOCONVERT
);
1195 /* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */
1196 log
->l_last_sync_lsn
= log
->l_tail_lsn
;
1197 log
->l_curr_cycle
= 1; /* 0 is bad since this is initial value */
1198 log
->l_grant_reserve_cycle
= 1;
1199 log
->l_grant_write_cycle
= 1;
1201 if (XFS_SB_VERSION_HASLOGV2(&mp
->m_sb
)) {
1202 if (mp
->m_sb
.sb_logsunit
<= 1) {
1203 log
->l_stripemask
= 1;
1205 log
->l_stripemask
= 1 <<
1206 xfs_highbit32(mp
->m_sb
.sb_logsunit
>> BBSHIFT
);
1209 if (XFS_SB_VERSION_HASSECTOR(&mp
->m_sb
)) {
1210 log
->l_sectbb_log
= mp
->m_sb
.sb_logsectlog
- BBSHIFT
;
1211 ASSERT(log
->l_sectbb_log
<= mp
->m_sectbb_log
);
1212 /* for larger sector sizes, must have v2 or external log */
1213 ASSERT(log
->l_sectbb_log
== 0 ||
1214 log
->l_logBBstart
== 0 ||
1215 XFS_SB_VERSION_HASLOGV2(&mp
->m_sb
));
1216 ASSERT(mp
->m_sb
.sb_logsectlog
>= BBSHIFT
);
1218 log
->l_sectbb_mask
= (1 << log
->l_sectbb_log
) - 1;
1220 xlog_get_iclog_buffer_size(mp
, log
);
1222 bp
= log
->l_xbuf
= XFS_getrbuf(0,mp
); /* get my locked buffer */ /* mp needed for pagebuf/linux only */
1224 XFS_BUF_SET_TARGET(bp
, mp
->m_logdev_targp
);
1225 XFS_BUF_SET_SIZE(bp
, log
->l_iclog_size
);
1226 XFS_BUF_SET_IODONE_FUNC(bp
, xlog_iodone
);
1227 XFS_BUF_SET_BDSTRAT_FUNC(bp
, xlog_bdstrat_cb
);
1228 XFS_BUF_SET_FSPRIVATE2(bp
, (unsigned long)1);
1229 ASSERT(XFS_BUF_ISBUSY(log
->l_xbuf
));
1230 ASSERT(XFS_BUF_VALUSEMA(log
->l_xbuf
) <= 0);
1231 spinlock_init(&log
->l_icloglock
, "iclog");
1232 spinlock_init(&log
->l_grant_lock
, "grhead_iclog");
1233 initnsema(&log
->l_flushsema
, 0, "ic-flush");
1234 xlog_state_ticket_alloc(log
); /* wait until after icloglock inited */
1236 /* log record size must be multiple of BBSIZE; see xlog_rec_header_t */
1237 ASSERT((XFS_BUF_SIZE(bp
) & BBMASK
) == 0);
1239 iclogp
= &log
->l_iclog
;
1241 * The amount of memory to allocate for the iclog structure is
1242 * rather funky due to the way the structure is defined. It is
1243 * done this way so that we can use different sizes for machines
1244 * with different amounts of memory. See the definition of
1245 * xlog_in_core_t in xfs_log_priv.h for details.
1247 iclogsize
= log
->l_iclog_size
;
1248 ASSERT(log
->l_iclog_size
>= 4096);
1249 for (i
=0; i
< log
->l_iclog_bufs
; i
++) {
1250 *iclogp
= (xlog_in_core_t
*)
1251 kmem_zalloc(sizeof(xlog_in_core_t
), KM_SLEEP
);
1253 iclog
->hic_data
= (xlog_in_core_2_t
*)
1254 kmem_alloc(iclogsize
, KM_SLEEP
);
1256 iclog
->ic_prev
= prev_iclog
;
1258 log
->l_iclog_bak
[i
] = (xfs_caddr_t
)&(iclog
->ic_header
);
1260 head
= &iclog
->ic_header
;
1261 memset(head
, 0, sizeof(xlog_rec_header_t
));
1262 INT_SET(head
->h_magicno
, ARCH_CONVERT
, XLOG_HEADER_MAGIC_NUM
);
1263 INT_SET(head
->h_version
, ARCH_CONVERT
,
1264 XFS_SB_VERSION_HASLOGV2(&log
->l_mp
->m_sb
) ? 2 : 1);
1265 INT_SET(head
->h_size
, ARCH_CONVERT
, log
->l_iclog_size
);
1267 INT_SET(head
->h_fmt
, ARCH_CONVERT
, XLOG_FMT
);
1268 memcpy(&head
->h_fs_uuid
, &mp
->m_sb
.sb_uuid
, sizeof(uuid_t
));
1270 bp
= iclog
->ic_bp
= XFS_getrbuf(0,mp
); /* my locked buffer */ /* mp need for pagebuf/linux only */
1271 XFS_BUF_SET_TARGET(bp
, mp
->m_logdev_targp
);
1272 XFS_BUF_SET_SIZE(bp
, log
->l_iclog_size
);
1273 XFS_BUF_SET_IODONE_FUNC(bp
, xlog_iodone
);
1274 XFS_BUF_SET_BDSTRAT_FUNC(bp
, xlog_bdstrat_cb
);
1275 XFS_BUF_SET_FSPRIVATE2(bp
, (unsigned long)1);
1277 iclog
->ic_size
= XFS_BUF_SIZE(bp
) - log
->l_iclog_hsize
;
1278 iclog
->ic_state
= XLOG_STATE_ACTIVE
;
1279 iclog
->ic_log
= log
;
1280 iclog
->ic_callback_tail
= &(iclog
->ic_callback
);
1281 iclog
->ic_datap
= (char *)iclog
->hic_data
+ log
->l_iclog_hsize
;
1283 ASSERT(XFS_BUF_ISBUSY(iclog
->ic_bp
));
1284 ASSERT(XFS_BUF_VALUSEMA(iclog
->ic_bp
) <= 0);
1285 sv_init(&iclog
->ic_forcesema
, SV_DEFAULT
, "iclog-force");
1286 sv_init(&iclog
->ic_writesema
, SV_DEFAULT
, "iclog-write");
1288 iclogp
= &iclog
->ic_next
;
1290 *iclogp
= log
->l_iclog
; /* complete ring */
1291 log
->l_iclog
->ic_prev
= prev_iclog
; /* re-write 1st prev ptr */
1294 } /* xlog_alloc_log */
1298 * Write out the commit record of a transaction associated with the given
1299 * ticket. Return the lsn of the commit record.
1302 xlog_commit_record(xfs_mount_t
*mp
,
1303 xlog_ticket_t
*ticket
,
1304 xlog_in_core_t
**iclog
,
1305 xfs_lsn_t
*commitlsnp
)
1308 xfs_log_iovec_t reg
[1];
1313 ASSERT_ALWAYS(iclog
);
1314 if ((error
= xlog_write(mp
, reg
, 1, ticket
, commitlsnp
,
1315 iclog
, XLOG_COMMIT_TRANS
))) {
1316 xfs_force_shutdown(mp
, XFS_LOG_IO_ERROR
);
1319 } /* xlog_commit_record */
1323 * Push on the buffer cache code if we ever use more than 75% of the on-disk
1324 * log space. This code pushes on the lsn which would supposedly free up
1325 * the 25% which we want to leave free. We may need to adopt a policy which
1326 * pushes on an lsn which is further along in the log once we reach the high
1327 * water mark. In this manner, we would be creating a low water mark.
1330 xlog_grant_push_ail(xfs_mount_t
*mp
,
1333 xlog_t
*log
= mp
->m_log
; /* pointer to the log */
1334 xfs_lsn_t tail_lsn
; /* lsn of the log tail */
1335 xfs_lsn_t threshold_lsn
= 0; /* lsn we'd like to be at */
1336 int free_blocks
; /* free blocks left to write to */
1337 int free_bytes
; /* free bytes left to write to */
1338 int threshold_block
; /* block in lsn we'd like to be at */
1339 int threshold_cycle
; /* lsn cycle we'd like to be at */
1343 ASSERT(BTOBB(need_bytes
) < log
->l_logBBsize
);
1345 s
= GRANT_LOCK(log
);
1346 free_bytes
= xlog_space_left(log
,
1347 log
->l_grant_reserve_cycle
,
1348 log
->l_grant_reserve_bytes
);
1349 tail_lsn
= log
->l_tail_lsn
;
1350 free_blocks
= BTOBBT(free_bytes
);
1353 * Set the threshold for the minimum number of free blocks in the
1354 * log to the maximum of what the caller needs, one quarter of the
1355 * log, and 256 blocks.
1357 free_threshold
= BTOBB(need_bytes
);
1358 free_threshold
= MAX(free_threshold
, (log
->l_logBBsize
>> 2));
1359 free_threshold
= MAX(free_threshold
, 256);
1360 if (free_blocks
< free_threshold
) {
1361 threshold_block
= BLOCK_LSN(tail_lsn
, ARCH_NOCONVERT
) + free_threshold
;
1362 threshold_cycle
= CYCLE_LSN(tail_lsn
, ARCH_NOCONVERT
);
1363 if (threshold_block
>= log
->l_logBBsize
) {
1364 threshold_block
-= log
->l_logBBsize
;
1365 threshold_cycle
+= 1;
1367 ASSIGN_ANY_LSN(threshold_lsn
, threshold_cycle
,
1368 threshold_block
, ARCH_NOCONVERT
);
1370 /* Don't pass in an lsn greater than the lsn of the last
1371 * log record known to be on disk.
1373 if (XFS_LSN_CMP_ARCH(threshold_lsn
, log
->l_last_sync_lsn
, ARCH_NOCONVERT
) > 0)
1374 threshold_lsn
= log
->l_last_sync_lsn
;
1376 GRANT_UNLOCK(log
, s
);
1379 * Get the transaction layer to kick the dirty buffers out to
1380 * disk asynchronously. No point in trying to do this if
1381 * the filesystem is shutting down.
1383 if (threshold_lsn
&&
1384 !XLOG_FORCED_SHUTDOWN(log
))
1385 xfs_trans_push_ail(mp
, threshold_lsn
);
1386 } /* xlog_grant_push_ail */
1390 * Flush out the in-core log (iclog) to the on-disk log in a synchronous or
1391 * asynchronous fashion. Previously, we should have moved the current iclog
1392 * ptr in the log to point to the next available iclog. This allows further
1393 * write to continue while this code syncs out an iclog ready to go.
1394 * Before an in-core log can be written out, the data section must be scanned
1395 * to save away the 1st word of each BBSIZE block into the header. We replace
1396 * it with the current cycle count. Each BBSIZE block is tagged with the
1397 * cycle count because there in an implicit assumption that drives will
1398 * guarantee that entire 512 byte blocks get written at once. In other words,
1399 * we can't have part of a 512 byte block written and part not written. By
1400 * tagging each block, we will know which blocks are valid when recovering
1401 * after an unclean shutdown.
1403 * This routine is single threaded on the iclog. No other thread can be in
1404 * this routine with the same iclog. Changing contents of iclog can there-
1405 * fore be done without grabbing the state machine lock. Updating the global
1406 * log will require grabbing the lock though.
1408 * The entire log manager uses a logical block numbering scheme. Only
1409 * log_sync (and then only bwrite()) know about the fact that the log may
1410 * not start with block zero on a given device. The log block start offset
1411 * is added immediately before calling bwrite().
1415 xlog_sync(xlog_t
*log
,
1416 xlog_in_core_t
*iclog
)
1418 xfs_caddr_t dptr
; /* pointer to byte sized element */
1422 uint count
; /* byte count of bwrite */
1423 int split
= 0; /* split write into two regions */
1426 XFS_STATS_INC(xfsstats
.xs_log_writes
);
1427 ASSERT(iclog
->ic_refcnt
== 0);
1429 /* Round out the log write size */
1430 if (iclog
->ic_offset
& BBMASK
) {
1431 /* count of 0 is already accounted for up in
1432 * xlog_state_sync_all(). Once in this routine,
1433 * operations on the iclog are single threaded.
1435 * Difference between rounded up size and size
1437 count
= iclog
->ic_offset
& BBMASK
;
1438 iclog
->ic_roundoff
+= BBSIZE
- count
;
1440 if (XFS_SB_VERSION_HASLOGV2(&log
->l_mp
->m_sb
)) {
1441 unsigned sunit
= BTOBB(log
->l_mp
->m_sb
.sb_logsunit
);
1445 count
= BTOBB(log
->l_iclog_hsize
+ iclog
->ic_offset
);
1446 if (count
& (sunit
- 1)) {
1447 roundup
= sunit
- (count
& (sunit
- 1));
1451 iclog
->ic_offset
+= BBTOB(roundup
);
1454 log
->l_roundoff
+= iclog
->ic_roundoff
;
1456 xlog_pack_data(log
, iclog
); /* put cycle number in every block */
1458 /* real byte length */
1459 INT_SET(iclog
->ic_header
.h_len
, ARCH_CONVERT
, iclog
->ic_offset
);
1460 /* put ops count in correct order */
1461 ops
= iclog
->ic_header
.h_num_logops
;
1462 INT_SET(iclog
->ic_header
.h_num_logops
, ARCH_CONVERT
, ops
);
1465 ASSERT(XFS_BUF_FSPRIVATE2(bp
, unsigned long) == (unsigned long)1);
1466 XFS_BUF_SET_FSPRIVATE2(bp
, (unsigned long)2);
1467 XFS_BUF_SET_ADDR(bp
, BLOCK_LSN(iclog
->ic_header
.h_lsn
, ARCH_CONVERT
));
1469 /* Count is already rounded up to a BBSIZE above */
1470 count
= iclog
->ic_offset
+ iclog
->ic_roundoff
;
1471 ASSERT((count
& BBMASK
) == 0);
1473 /* Add for LR header */
1474 count
+= log
->l_iclog_hsize
;
1475 XFS_STATS_ADD(xfsstats
.xs_log_blocks
, BTOBB(count
));
1477 /* Do we need to split this write into 2 parts? */
1478 if (XFS_BUF_ADDR(bp
) + BTOBB(count
) > log
->l_logBBsize
) {
1479 split
= count
- (BBTOB(log
->l_logBBsize
- XFS_BUF_ADDR(bp
)));
1480 count
= BBTOB(log
->l_logBBsize
- XFS_BUF_ADDR(bp
));
1481 iclog
->ic_bwritecnt
= 2; /* split into 2 writes */
1483 iclog
->ic_bwritecnt
= 1;
1485 XFS_BUF_SET_PTR(bp
, (xfs_caddr_t
) &(iclog
->ic_header
), count
);
1486 XFS_BUF_SET_FSPRIVATE(bp
, iclog
); /* save for later */
1490 * Do a disk write cache flush for the log block.
1491 * This is a bit of a sledgehammer, it would be better
1492 * to use a tag barrier here that just prevents reordering.
1493 * It may not be needed to flush the first split block in the log wrap
1494 * case, but do it anyways to be safe -AK
1496 if (!(log
->l_mp
->m_flags
& XFS_MOUNT_NOLOGFLUSH
))
1499 ASSERT(XFS_BUF_ADDR(bp
) <= log
->l_logBBsize
-1);
1500 ASSERT(XFS_BUF_ADDR(bp
) + BTOBB(count
) <= log
->l_logBBsize
);
1502 xlog_verify_iclog(log
, iclog
, count
, B_TRUE
);
1504 /* account for log which doesn't start at block #0 */
1505 XFS_BUF_SET_ADDR(bp
, XFS_BUF_ADDR(bp
) + log
->l_logBBstart
);
1507 * Don't call xfs_bwrite here. We do log-syncs even when the filesystem
1512 if ((error
= XFS_bwrite(bp
))) {
1513 xfs_ioerror_alert("xlog_sync", log
->l_mp
, bp
,
1518 bp
= iclog
->ic_log
->l_xbuf
;
1519 ASSERT(XFS_BUF_FSPRIVATE2(bp
, unsigned long) ==
1521 XFS_BUF_SET_FSPRIVATE2(bp
, (unsigned long)2);
1522 XFS_BUF_SET_ADDR(bp
, 0); /* logical 0 */
1523 XFS_BUF_SET_PTR(bp
, (xfs_caddr_t
)((__psint_t
)&(iclog
->ic_header
)+
1524 (__psint_t
)count
), split
);
1525 XFS_BUF_SET_FSPRIVATE(bp
, iclog
);
1528 if (!(log
->l_mp
->m_flags
& XFS_MOUNT_NOLOGFLUSH
))
1530 dptr
= XFS_BUF_PTR(bp
);
1532 * Bump the cycle numbers at the start of each block
1533 * since this part of the buffer is at the start of
1534 * a new cycle. Watch out for the header magic number
1537 for (i
=0; i
<split
; i
+= BBSIZE
) {
1538 INT_MOD(*(uint
*)dptr
, ARCH_CONVERT
, +1);
1539 if (INT_GET(*(uint
*)dptr
, ARCH_CONVERT
) == XLOG_HEADER_MAGIC_NUM
)
1540 INT_MOD(*(uint
*)dptr
, ARCH_CONVERT
, +1);
1544 ASSERT(XFS_BUF_ADDR(bp
) <= log
->l_logBBsize
-1);
1545 ASSERT(XFS_BUF_ADDR(bp
) + BTOBB(count
) <= log
->l_logBBsize
);
1547 /* account for internal log which does't start at block #0 */
1548 XFS_BUF_SET_ADDR(bp
, XFS_BUF_ADDR(bp
) + log
->l_logBBstart
);
1550 if ((error
= XFS_bwrite(bp
))) {
1551 xfs_ioerror_alert("xlog_sync (split)", log
->l_mp
,
1552 bp
, XFS_BUF_ADDR(bp
));
1561 * Unallocate a log structure
1564 xlog_unalloc_log(xlog_t
*log
)
1566 xlog_in_core_t
*iclog
, *next_iclog
;
1567 xlog_ticket_t
*tic
, *next_tic
;
1571 iclog
= log
->l_iclog
;
1572 for (i
=0; i
<log
->l_iclog_bufs
; i
++) {
1573 sv_destroy(&iclog
->ic_forcesema
);
1574 sv_destroy(&iclog
->ic_writesema
);
1575 XFS_freerbuf(iclog
->ic_bp
);
1577 if (iclog
->ic_trace
!= NULL
) {
1578 ktrace_free(iclog
->ic_trace
);
1581 next_iclog
= iclog
->ic_next
;
1582 kmem_free(iclog
->hic_data
, log
->l_iclog_size
);
1583 kmem_free(iclog
, sizeof(xlog_in_core_t
));
1586 freesema(&log
->l_flushsema
);
1587 spinlock_destroy(&log
->l_icloglock
);
1588 spinlock_destroy(&log
->l_grant_lock
);
1590 /* XXXsup take a look at this again. */
1591 if ((log
->l_ticket_cnt
!= log
->l_ticket_tcnt
) &&
1592 !XLOG_FORCED_SHUTDOWN(log
)) {
1593 xfs_fs_cmn_err(CE_WARN
, log
->l_mp
,
1594 "xlog_unalloc_log: (cnt: %d, total: %d)",
1595 log
->l_ticket_cnt
, log
->l_ticket_tcnt
);
1596 /* ASSERT(log->l_ticket_cnt == log->l_ticket_tcnt); */
1599 tic
= log
->l_unmount_free
;
1601 next_tic
= tic
->t_next
;
1602 kmem_free(tic
, NBPP
);
1606 XFS_freerbuf(log
->l_xbuf
);
1608 if (log
->l_trace
!= NULL
) {
1609 ktrace_free(log
->l_trace
);
1611 if (log
->l_grant_trace
!= NULL
) {
1612 ktrace_free(log
->l_grant_trace
);
1615 log
->l_mp
->m_log
= NULL
;
1616 kmem_free(log
, sizeof(xlog_t
));
1617 } /* xlog_unalloc_log */
1620 * Update counters atomically now that memcpy is done.
1624 xlog_state_finish_copy(xlog_t
*log
,
1625 xlog_in_core_t
*iclog
,
1633 iclog
->ic_header
.h_num_logops
+= record_cnt
;
1634 iclog
->ic_offset
+= copy_bytes
;
1637 } /* xlog_state_finish_copy */
1643 * Write some region out to in-core log
1645 * This will be called when writing externally provided regions or when
1646 * writing out a commit record for a given transaction.
1648 * General algorithm:
1649 * 1. Find total length of this write. This may include adding to the
1650 * lengths passed in.
1651 * 2. Check whether we violate the tickets reservation.
1652 * 3. While writing to this iclog
1653 * A. Reserve as much space in this iclog as can get
1654 * B. If this is first write, save away start lsn
1655 * C. While writing this region:
1656 * 1. If first write of transaction, write start record
1657 * 2. Write log operation header (header per region)
1658 * 3. Find out if we can fit entire region into this iclog
1659 * 4. Potentially, verify destination memcpy ptr
1660 * 5. Memcpy (partial) region
1661 * 6. If partial copy, release iclog; otherwise, continue
1662 * copying more regions into current iclog
1663 * 4. Mark want sync bit (in simulation mode)
1664 * 5. Release iclog for potential flush to on-disk log.
1667 * 1. Panic if reservation is overrun. This should never happen since
1668 * reservation amounts are generated internal to the filesystem.
1670 * 1. Tickets are single threaded data structures.
1671 * 2. The XLOG_END_TRANS & XLOG_CONTINUE_TRANS flags are passed down to the
1672 * syncing routine. When a single log_write region needs to span
1673 * multiple in-core logs, the XLOG_CONTINUE_TRANS bit should be set
1674 * on all log operation writes which don't contain the end of the
1675 * region. The XLOG_END_TRANS bit is used for the in-core log
1676 * operation which contains the end of the continued log_write region.
1677 * 3. When xlog_state_get_iclog_space() grabs the rest of the current iclog,
1678 * we don't really know exactly how much space will be used. As a result,
1679 * we don't update ic_offset until the end when we know exactly how many
1680 * bytes have been written out.
1683 xlog_write(xfs_mount_t
* mp
,
1684 xfs_log_iovec_t reg
[],
1686 xfs_log_ticket_t tic
,
1687 xfs_lsn_t
*start_lsn
,
1688 xlog_in_core_t
**commit_iclog
,
1691 xlog_t
*log
= mp
->m_log
;
1692 xlog_ticket_t
*ticket
= (xlog_ticket_t
*)tic
;
1693 xlog_op_header_t
*logop_head
; /* ptr to log operation header */
1694 xlog_in_core_t
*iclog
; /* ptr to current in-core log */
1695 __psint_t ptr
; /* copy address into data region */
1696 int len
; /* # xlog_write() bytes 2 still copy */
1697 int index
; /* region index currently copying */
1698 int log_offset
; /* offset (from 0) into data region */
1699 int start_rec_copy
; /* # bytes to copy for start record */
1700 int partial_copy
; /* did we split a region? */
1701 int partial_copy_len
;/* # bytes copied if split region */
1702 int need_copy
; /* # bytes need to memcpy this region */
1703 int copy_len
; /* # bytes actually memcpy'ing */
1704 int copy_off
; /* # bytes from entry start */
1705 int contwr
; /* continued write of in-core log? */
1706 int firstwr
= 0; /* first write of transaction */
1708 int record_cnt
= 0, data_cnt
= 0;
1710 partial_copy_len
= partial_copy
= 0;
1712 /* Calculate potential maximum space. Each region gets its own
1713 * xlog_op_header_t and may need to be double word aligned.
1716 if (ticket
->t_flags
& XLOG_TIC_INITED
) /* acct for start rec of xact */
1717 len
+= sizeof(xlog_op_header_t
);
1719 for (index
= 0; index
< nentries
; index
++) {
1720 len
+= sizeof(xlog_op_header_t
); /* each region gets >= 1 */
1721 len
+= reg
[index
].i_len
;
1723 contwr
= *start_lsn
= 0;
1725 if (ticket
->t_curr_res
< len
) {
1728 "xfs_log_write: reservation ran out. Need to up reservation");
1730 /* Customer configurable panic */
1731 xfs_cmn_err(XFS_PTAG_LOGRES
, CE_ALERT
, mp
,
1732 "xfs_log_write: reservation ran out. Need to up reservation");
1733 /* If we did not panic, shutdown the filesystem */
1734 xfs_force_shutdown(mp
, XFS_CORRUPT_INCORE
);
1737 ticket
->t_curr_res
-= len
;
1739 for (index
= 0; index
< nentries
; ) {
1740 if ((error
= xlog_state_get_iclog_space(log
, len
, &iclog
, ticket
,
1741 &contwr
, &log_offset
)))
1744 ASSERT(log_offset
<= iclog
->ic_size
- 1);
1745 ptr
= (__psint_t
) ((char *)iclog
->ic_datap
+log_offset
);
1747 /* start_lsn is the first lsn written to. That's all we need. */
1749 *start_lsn
= INT_GET(iclog
->ic_header
.h_lsn
, ARCH_CONVERT
);
1751 /* This loop writes out as many regions as can fit in the amount
1752 * of space which was allocated by xlog_state_get_iclog_space().
1754 while (index
< nentries
) {
1755 ASSERT(reg
[index
].i_len
% sizeof(__int32_t
) == 0);
1756 ASSERT((__psint_t
)ptr
% sizeof(__int32_t
) == 0);
1759 /* If first write for transaction, insert start record.
1760 * We can't be trying to commit if we are inited. We can't
1761 * have any "partial_copy" if we are inited.
1763 if (ticket
->t_flags
& XLOG_TIC_INITED
) {
1764 logop_head
= (xlog_op_header_t
*)ptr
;
1765 INT_SET(logop_head
->oh_tid
, ARCH_CONVERT
, ticket
->t_tid
);
1766 logop_head
->oh_clientid
= ticket
->t_clientid
;
1767 INT_ZERO(logop_head
->oh_len
, ARCH_CONVERT
);
1768 logop_head
->oh_flags
= XLOG_START_TRANS
;
1769 INT_ZERO(logop_head
->oh_res2
, ARCH_CONVERT
);
1770 ticket
->t_flags
&= ~XLOG_TIC_INITED
; /* clear bit */
1771 firstwr
= 1; /* increment log ops below */
1774 start_rec_copy
= sizeof(xlog_op_header_t
);
1775 xlog_write_adv_cnt(ptr
, len
, log_offset
, start_rec_copy
);
1778 /* Copy log operation header directly into data section */
1779 logop_head
= (xlog_op_header_t
*)ptr
;
1780 INT_SET(logop_head
->oh_tid
, ARCH_CONVERT
, ticket
->t_tid
);
1781 logop_head
->oh_clientid
= ticket
->t_clientid
;
1782 INT_ZERO(logop_head
->oh_res2
, ARCH_CONVERT
);
1784 /* header copied directly */
1785 xlog_write_adv_cnt(ptr
, len
, log_offset
, sizeof(xlog_op_header_t
));
1787 /* are we copying a commit or unmount record? */
1788 logop_head
->oh_flags
= flags
;
1791 * We've seen logs corrupted with bad transaction client
1792 * ids. This makes sure that XFS doesn't generate them on.
1793 * Turn this into an EIO and shut down the filesystem.
1795 switch (logop_head
->oh_clientid
) {
1796 case XFS_TRANSACTION
:
1801 xfs_fs_cmn_err(CE_WARN
, mp
,
1802 "Bad XFS transaction clientid 0x%x in ticket 0x%p",
1803 logop_head
->oh_clientid
, tic
);
1804 return XFS_ERROR(EIO
);
1807 /* Partial write last time? => (partial_copy != 0)
1808 * need_copy is the amount we'd like to copy if everything could
1809 * fit in the current memcpy.
1811 need_copy
= reg
[index
].i_len
- partial_copy_len
;
1813 copy_off
= partial_copy_len
;
1814 if (need_copy
<= iclog
->ic_size
- log_offset
) { /*complete write */
1815 INT_SET(logop_head
->oh_len
, ARCH_CONVERT
, copy_len
= need_copy
);
1817 logop_head
->oh_flags
|= (XLOG_END_TRANS
|XLOG_WAS_CONT_TRANS
);
1818 partial_copy_len
= partial_copy
= 0;
1819 } else { /* partial write */
1820 copy_len
= iclog
->ic_size
- log_offset
;
1821 INT_SET(logop_head
->oh_len
, ARCH_CONVERT
, copy_len
);
1822 logop_head
->oh_flags
|= XLOG_CONTINUE_TRANS
;
1824 logop_head
->oh_flags
|= XLOG_WAS_CONT_TRANS
;
1825 partial_copy_len
+= copy_len
;
1827 len
+= sizeof(xlog_op_header_t
); /* from splitting of region */
1828 /* account for new log op header */
1829 ticket
->t_curr_res
-= sizeof(xlog_op_header_t
);
1831 xlog_verify_dest_ptr(log
, ptr
);
1834 ASSERT(copy_len
>= 0);
1835 memcpy((xfs_caddr_t
)ptr
, reg
[index
].i_addr
+ copy_off
, copy_len
);
1836 xlog_write_adv_cnt(ptr
, len
, log_offset
, copy_len
);
1838 /* make copy_len total bytes copied, including headers */
1839 copy_len
+= start_rec_copy
+ sizeof(xlog_op_header_t
);
1841 data_cnt
+= contwr
? copy_len
: 0;
1843 if (partial_copy
) { /* copied partial region */
1844 /* already marked WANT_SYNC by xlog_state_get_iclog_space */
1845 xlog_state_finish_copy(log
, iclog
, record_cnt
, data_cnt
);
1846 record_cnt
= data_cnt
= 0;
1847 if ((error
= xlog_state_release_iclog(log
, iclog
)))
1849 break; /* don't increment index */
1850 } else { /* copied entire region */
1852 partial_copy_len
= partial_copy
= 0;
1854 if (iclog
->ic_size
- log_offset
<= sizeof(xlog_op_header_t
)) {
1855 xlog_state_finish_copy(log
, iclog
, record_cnt
, data_cnt
);
1856 record_cnt
= data_cnt
= 0;
1857 xlog_state_want_sync(log
, iclog
);
1859 ASSERT(flags
& XLOG_COMMIT_TRANS
);
1860 *commit_iclog
= iclog
;
1861 } else if ((error
= xlog_state_release_iclog(log
, iclog
)))
1863 if (index
== nentries
)
1864 return 0; /* we are done */
1868 } /* if (partial_copy) */
1869 } /* while (index < nentries) */
1870 } /* for (index = 0; index < nentries; ) */
1873 xlog_state_finish_copy(log
, iclog
, record_cnt
, data_cnt
);
1875 ASSERT(flags
& XLOG_COMMIT_TRANS
);
1876 *commit_iclog
= iclog
;
1879 return (xlog_state_release_iclog(log
, iclog
));
1883 /*****************************************************************************
1885 * State Machine functions
1887 *****************************************************************************
1890 /* Clean iclogs starting from the head. This ordering must be
1891 * maintained, so an iclog doesn't become ACTIVE beyond one that
1892 * is SYNCING. This is also required to maintain the notion that we use
1893 * a counting semaphore to hold off would be writers to the log when every
1894 * iclog is trying to sync to disk.
1896 * State Change: DIRTY -> ACTIVE
1899 xlog_state_clean_log(xlog_t
*log
)
1901 xlog_in_core_t
*iclog
;
1904 iclog
= log
->l_iclog
;
1906 if (iclog
->ic_state
== XLOG_STATE_DIRTY
) {
1907 iclog
->ic_state
= XLOG_STATE_ACTIVE
;
1908 iclog
->ic_offset
= 0;
1909 iclog
->ic_callback
= 0; /* don't need to free */
1911 * If the number of ops in this iclog indicate it just
1912 * contains the dummy transaction, we can
1913 * change state into IDLE (the second time around).
1914 * Otherwise we should change the state into
1916 * We don't need to cover the dummy.
1919 (INT_GET(iclog
->ic_header
.h_num_logops
, ARCH_CONVERT
) == XLOG_COVER_OPS
)) {
1923 * We have two dirty iclogs so start over
1924 * This could also be num of ops indicates
1925 * this is not the dummy going out.
1929 INT_ZERO(iclog
->ic_header
.h_num_logops
, ARCH_CONVERT
);
1930 memset(iclog
->ic_header
.h_cycle_data
, 0,
1931 sizeof(iclog
->ic_header
.h_cycle_data
));
1932 INT_ZERO(iclog
->ic_header
.h_lsn
, ARCH_CONVERT
);
1933 } else if (iclog
->ic_state
== XLOG_STATE_ACTIVE
)
1936 break; /* stop cleaning */
1937 iclog
= iclog
->ic_next
;
1938 } while (iclog
!= log
->l_iclog
);
1940 /* log is locked when we are called */
1942 * Change state for the dummy log recording.
1943 * We usually go to NEED. But we go to NEED2 if the changed indicates
1944 * we are done writing the dummy record.
1945 * If we are done with the second dummy recored (DONE2), then
1949 switch (log
->l_covered_state
) {
1950 case XLOG_STATE_COVER_IDLE
:
1951 case XLOG_STATE_COVER_NEED
:
1952 case XLOG_STATE_COVER_NEED2
:
1953 log
->l_covered_state
= XLOG_STATE_COVER_NEED
;
1956 case XLOG_STATE_COVER_DONE
:
1958 log
->l_covered_state
= XLOG_STATE_COVER_NEED2
;
1960 log
->l_covered_state
= XLOG_STATE_COVER_NEED
;
1963 case XLOG_STATE_COVER_DONE2
:
1965 log
->l_covered_state
= XLOG_STATE_COVER_IDLE
;
1967 log
->l_covered_state
= XLOG_STATE_COVER_NEED
;
1974 } /* xlog_state_clean_log */
1977 xlog_get_lowest_lsn(
1980 xlog_in_core_t
*lsn_log
;
1981 xfs_lsn_t lowest_lsn
, lsn
;
1983 lsn_log
= log
->l_iclog
;
1986 if (!(lsn_log
->ic_state
& (XLOG_STATE_ACTIVE
|XLOG_STATE_DIRTY
))) {
1987 lsn
= INT_GET(lsn_log
->ic_header
.h_lsn
, ARCH_CONVERT
);
1988 if ((lsn
&& !lowest_lsn
) ||
1989 (XFS_LSN_CMP_ARCH(lsn
, lowest_lsn
, ARCH_NOCONVERT
) < 0)) {
1993 lsn_log
= lsn_log
->ic_next
;
1994 } while (lsn_log
!= log
->l_iclog
);
2000 xlog_state_do_callback(
2003 xlog_in_core_t
*ciclog
)
2005 xlog_in_core_t
*iclog
;
2006 xlog_in_core_t
*first_iclog
; /* used to know when we've
2007 * processed all iclogs once */
2008 xfs_log_callback_t
*cb
, *cb_next
;
2010 xfs_lsn_t lowest_lsn
;
2011 int ioerrors
; /* counter: iclogs with errors */
2012 int loopdidcallbacks
; /* flag: inner loop did callbacks*/
2013 int funcdidcallbacks
; /* flag: function did callbacks */
2014 int repeats
; /* for issuing console warnings if
2015 * looping too many times */
2019 first_iclog
= iclog
= log
->l_iclog
;
2021 funcdidcallbacks
= 0;
2026 * Scan all iclogs starting with the one pointed to by the
2027 * log. Reset this starting point each time the log is
2028 * unlocked (during callbacks).
2030 * Keep looping through iclogs until one full pass is made
2031 * without running any callbacks.
2033 first_iclog
= log
->l_iclog
;
2034 iclog
= log
->l_iclog
;
2035 loopdidcallbacks
= 0;
2040 /* skip all iclogs in the ACTIVE & DIRTY states */
2041 if (iclog
->ic_state
&
2042 (XLOG_STATE_ACTIVE
|XLOG_STATE_DIRTY
)) {
2043 iclog
= iclog
->ic_next
;
2048 * Between marking a filesystem SHUTDOWN and stopping
2049 * the log, we do flush all iclogs to disk (if there
2050 * wasn't a log I/O error). So, we do want things to
2051 * go smoothly in case of just a SHUTDOWN w/o a
2054 if (!(iclog
->ic_state
& XLOG_STATE_IOERROR
)) {
2056 * Can only perform callbacks in order. Since
2057 * this iclog is not in the DONE_SYNC/
2058 * DO_CALLBACK state, we skip the rest and
2059 * just try to clean up. If we set our iclog
2060 * to DO_CALLBACK, we will not process it when
2061 * we retry since a previous iclog is in the
2062 * CALLBACK and the state cannot change since
2063 * we are holding the LOG_LOCK.
2065 if (!(iclog
->ic_state
&
2066 (XLOG_STATE_DONE_SYNC
|
2067 XLOG_STATE_DO_CALLBACK
))) {
2068 if (ciclog
&& (ciclog
->ic_state
==
2069 XLOG_STATE_DONE_SYNC
)) {
2070 ciclog
->ic_state
= XLOG_STATE_DO_CALLBACK
;
2075 * We now have an iclog that is in either the
2076 * DO_CALLBACK or DONE_SYNC states. The other
2077 * states (WANT_SYNC, SYNCING, or CALLBACK were
2078 * caught by the above if and are going to
2079 * clean (i.e. we aren't doing their callbacks)
2084 * We will do one more check here to see if we
2085 * have chased our tail around.
2088 lowest_lsn
= xlog_get_lowest_lsn(log
);
2092 INT_GET(iclog
->ic_header
.h_lsn
, ARCH_CONVERT
),
2095 iclog
= iclog
->ic_next
;
2096 continue; /* Leave this iclog for
2100 iclog
->ic_state
= XLOG_STATE_CALLBACK
;
2104 /* l_last_sync_lsn field protected by
2105 * GRANT_LOCK. Don't worry about iclog's lsn.
2106 * No one else can be here except us.
2108 s
= GRANT_LOCK(log
);
2109 ASSERT(XFS_LSN_CMP_ARCH(
2110 log
->l_last_sync_lsn
,
2111 INT_GET(iclog
->ic_header
.h_lsn
, ARCH_CONVERT
),
2114 log
->l_last_sync_lsn
= INT_GET(iclog
->ic_header
.h_lsn
, ARCH_CONVERT
);
2115 GRANT_UNLOCK(log
, s
);
2118 * Keep processing entries in the callback list
2119 * until we come around and it is empty. We
2120 * need to atomically see that the list is
2121 * empty and change the state to DIRTY so that
2122 * we don't miss any more callbacks being added.
2128 cb
= iclog
->ic_callback
;
2131 iclog
->ic_callback_tail
= &(iclog
->ic_callback
);
2132 iclog
->ic_callback
= 0;
2135 /* perform callbacks in the order given */
2136 for (; cb
!= 0; cb
= cb_next
) {
2137 cb_next
= cb
->cb_next
;
2138 cb
->cb_func(cb
->cb_arg
, aborted
);
2141 cb
= iclog
->ic_callback
;
2147 ASSERT(iclog
->ic_callback
== 0);
2148 if (!(iclog
->ic_state
& XLOG_STATE_IOERROR
))
2149 iclog
->ic_state
= XLOG_STATE_DIRTY
;
2152 * Transition from DIRTY to ACTIVE if applicable.
2153 * NOP if STATE_IOERROR.
2155 xlog_state_clean_log(log
);
2157 /* wake up threads waiting in xfs_log_force() */
2158 sv_broadcast(&iclog
->ic_forcesema
);
2160 iclog
= iclog
->ic_next
;
2161 } while (first_iclog
!= iclog
);
2162 if (repeats
&& (repeats
% 10) == 0) {
2163 xfs_fs_cmn_err(CE_WARN
, log
->l_mp
,
2164 "xlog_state_do_callback: looping %d", repeats
);
2166 } while (!ioerrors
&& loopdidcallbacks
);
2169 * make one last gasp attempt to see if iclogs are being left in
2173 if (funcdidcallbacks
) {
2174 first_iclog
= iclog
= log
->l_iclog
;
2176 ASSERT(iclog
->ic_state
!= XLOG_STATE_DO_CALLBACK
);
2178 * Terminate the loop if iclogs are found in states
2179 * which will cause other threads to clean up iclogs.
2181 * SYNCING - i/o completion will go through logs
2182 * DONE_SYNC - interrupt thread should be waiting for
2184 * IOERROR - give up hope all ye who enter here
2186 if (iclog
->ic_state
== XLOG_STATE_SYNCING
||
2187 iclog
->ic_state
== XLOG_STATE_DONE_SYNC
||
2188 iclog
->ic_state
== XLOG_STATE_IOERROR
)
2190 iclog
= iclog
->ic_next
;
2191 } while (first_iclog
!= iclog
);
2195 if (log
->l_iclog
->ic_state
& (XLOG_STATE_ACTIVE
|XLOG_STATE_IOERROR
)) {
2196 flushcnt
= log
->l_flushcnt
;
2197 log
->l_flushcnt
= 0;
2201 vsema(&log
->l_flushsema
);
2202 } /* xlog_state_do_callback */
2206 * Finish transitioning this iclog to the dirty state.
2208 * Make sure that we completely execute this routine only when this is
2209 * the last call to the iclog. There is a good chance that iclog flushes,
2210 * when we reach the end of the physical log, get turned into 2 separate
2211 * calls to bwrite. Hence, one iclog flush could generate two calls to this
2212 * routine. By using the reference count bwritecnt, we guarantee that only
2213 * the second completion goes through.
2215 * Callbacks could take time, so they are done outside the scope of the
2216 * global state machine log lock. Assume that the calls to cvsema won't
2217 * take a long time. At least we know it won't sleep.
2220 xlog_state_done_syncing(
2221 xlog_in_core_t
*iclog
,
2224 xlog_t
*log
= iclog
->ic_log
;
2229 ASSERT(iclog
->ic_state
== XLOG_STATE_SYNCING
||
2230 iclog
->ic_state
== XLOG_STATE_IOERROR
);
2231 ASSERT(iclog
->ic_refcnt
== 0);
2232 ASSERT(iclog
->ic_bwritecnt
== 1 || iclog
->ic_bwritecnt
== 2);
2236 * If we got an error, either on the first buffer, or in the case of
2237 * split log writes, on the second, we mark ALL iclogs STATE_IOERROR,
2238 * and none should ever be attempted to be written to disk
2241 if (iclog
->ic_state
!= XLOG_STATE_IOERROR
) {
2242 if (--iclog
->ic_bwritecnt
== 1) {
2246 iclog
->ic_state
= XLOG_STATE_DONE_SYNC
;
2250 * Someone could be sleeping prior to writing out the next
2251 * iclog buffer, we wake them all, one will get to do the
2252 * I/O, the others get to wait for the result.
2254 sv_broadcast(&iclog
->ic_writesema
);
2256 xlog_state_do_callback(log
, aborted
, iclog
); /* also cleans log */
2257 } /* xlog_state_done_syncing */
2261 * If the head of the in-core log ring is not (ACTIVE or DIRTY), then we must
2262 * sleep. The flush semaphore is set to the number of in-core buffers and
2263 * decremented around disk syncing. Therefore, if all buffers are syncing,
2264 * this semaphore will cause new writes to sleep until a sync completes.
2265 * Otherwise, this code just does p() followed by v(). This approximates
2266 * a sleep/wakeup except we can't race.
2268 * The in-core logs are used in a circular fashion. They are not used
2269 * out-of-order even when an iclog past the head is free.
2272 * * log_offset where xlog_write() can start writing into the in-core
2274 * * in-core log pointer to which xlog_write() should write.
2275 * * boolean indicating this is a continued write to an in-core log.
2276 * If this is the last write, then the in-core log's offset field
2277 * needs to be incremented, depending on the amount of data which
2281 xlog_state_get_iclog_space(xlog_t
*log
,
2283 xlog_in_core_t
**iclogp
,
2284 xlog_ticket_t
*ticket
,
2285 int *continued_write
,
2290 xlog_rec_header_t
*head
;
2291 xlog_in_core_t
*iclog
;
2296 if (XLOG_FORCED_SHUTDOWN(log
)) {
2298 return XFS_ERROR(EIO
);
2301 iclog
= log
->l_iclog
;
2302 if (! (iclog
->ic_state
== XLOG_STATE_ACTIVE
)) {
2305 xlog_trace_iclog(iclog
, XLOG_TRACE_SLEEP_FLUSH
);
2306 XFS_STATS_INC(xfsstats
.xs_log_noiclogs
);
2307 /* Ensure that log writes happen */
2308 psema(&log
->l_flushsema
, PINOD
);
2311 ASSERT(iclog
->ic_state
== XLOG_STATE_ACTIVE
);
2312 head
= &iclog
->ic_header
;
2314 iclog
->ic_refcnt
++; /* prevents sync */
2315 log_offset
= iclog
->ic_offset
;
2317 /* On the 1st write to an iclog, figure out lsn. This works
2318 * if iclogs marked XLOG_STATE_WANT_SYNC always write out what they are
2319 * committing to. If the offset is set, that's how many blocks
2322 if (log_offset
== 0) {
2323 ticket
->t_curr_res
-= log
->l_iclog_hsize
;
2324 INT_SET(head
->h_cycle
, ARCH_CONVERT
, log
->l_curr_cycle
);
2325 ASSIGN_LSN(head
->h_lsn
, log
, ARCH_CONVERT
);
2326 ASSERT(log
->l_curr_block
>= 0);
2328 /* round off error from last write with this iclog */
2329 ticket
->t_curr_res
-= iclog
->ic_roundoff
;
2330 log
->l_roundoff
-= iclog
->ic_roundoff
;
2331 iclog
->ic_roundoff
= 0;
2334 /* If there is enough room to write everything, then do it. Otherwise,
2335 * claim the rest of the region and make sure the XLOG_STATE_WANT_SYNC
2336 * bit is on, so this will get flushed out. Don't update ic_offset
2337 * until you know exactly how many bytes get copied. Therefore, wait
2338 * until later to update ic_offset.
2340 * xlog_write() algorithm assumes that at least 2 xlog_op_header_t's
2341 * can fit into remaining data section.
2343 if (iclog
->ic_size
- iclog
->ic_offset
< 2*sizeof(xlog_op_header_t
)) {
2344 xlog_state_switch_iclogs(log
, iclog
, iclog
->ic_size
);
2346 /* If I'm the only one writing to this iclog, sync it to disk */
2347 if (iclog
->ic_refcnt
== 1) {
2349 if ((error
= xlog_state_release_iclog(log
, iclog
)))
2358 /* Do we have enough room to write the full amount in the remainder
2359 * of this iclog? Or must we continue a write on the next iclog and
2360 * mark this iclog as completely taken? In the case where we switch
2361 * iclogs (to mark it taken), this particular iclog will release/sync
2362 * to disk in xlog_write().
2364 if (len
<= iclog
->ic_size
- iclog
->ic_offset
) {
2365 *continued_write
= 0;
2366 iclog
->ic_offset
+= len
;
2368 *continued_write
= 1;
2369 xlog_state_switch_iclogs(log
, iclog
, iclog
->ic_size
);
2373 ASSERT(iclog
->ic_offset
<= iclog
->ic_size
);
2376 *logoffsetp
= log_offset
;
2378 } /* xlog_state_get_iclog_space */
2381 * Atomically get the log space required for a log ticket.
2383 * Once a ticket gets put onto the reserveq, it will only return after
2384 * the needed reservation is satisfied.
2387 xlog_grant_log_space(xlog_t
*log
,
2399 if (log
->l_flags
& XLOG_ACTIVE_RECOVERY
)
2400 panic("grant Recovery problem");
2403 /* Is there space or do we need to sleep? */
2404 s
= GRANT_LOCK(log
);
2405 xlog_trace_loggrant(log
, tic
, "xlog_grant_log_space: enter");
2407 /* something is already sleeping; insert new transaction at end */
2408 if (log
->l_reserve_headq
) {
2409 XLOG_INS_TICKETQ(log
->l_reserve_headq
, tic
);
2410 xlog_trace_loggrant(log
, tic
,
2411 "xlog_grant_log_space: sleep 1");
2413 * Gotta check this before going to sleep, while we're
2414 * holding the grant lock.
2416 if (XLOG_FORCED_SHUTDOWN(log
))
2419 XFS_STATS_INC(xfsstats
.xs_sleep_logspace
);
2420 sv_wait(&tic
->t_sema
, PINOD
|PLTWAIT
, &log
->l_grant_lock
, s
);
2422 * If we got an error, and the filesystem is shutting down,
2423 * we'll catch it down below. So just continue...
2425 xlog_trace_loggrant(log
, tic
,
2426 "xlog_grant_log_space: wake 1");
2427 s
= GRANT_LOCK(log
);
2429 if (tic
->t_flags
& XFS_LOG_PERM_RESERV
)
2430 need_bytes
= tic
->t_unit_res
*tic
->t_ocnt
;
2432 need_bytes
= tic
->t_unit_res
;
2435 if (XLOG_FORCED_SHUTDOWN(log
))
2438 free_bytes
= xlog_space_left(log
, log
->l_grant_reserve_cycle
,
2439 log
->l_grant_reserve_bytes
);
2440 if (free_bytes
< need_bytes
) {
2441 if ((tic
->t_flags
& XLOG_TIC_IN_Q
) == 0)
2442 XLOG_INS_TICKETQ(log
->l_reserve_headq
, tic
);
2443 xlog_trace_loggrant(log
, tic
,
2444 "xlog_grant_log_space: sleep 2");
2445 XFS_STATS_INC(xfsstats
.xs_sleep_logspace
);
2446 sv_wait(&tic
->t_sema
, PINOD
|PLTWAIT
, &log
->l_grant_lock
, s
);
2448 if (XLOG_FORCED_SHUTDOWN(log
)) {
2449 s
= GRANT_LOCK(log
);
2453 xlog_trace_loggrant(log
, tic
,
2454 "xlog_grant_log_space: wake 2");
2455 xlog_grant_push_ail(log
->l_mp
, need_bytes
);
2456 s
= GRANT_LOCK(log
);
2458 } else if (tic
->t_flags
& XLOG_TIC_IN_Q
)
2459 XLOG_DEL_TICKETQ(log
->l_reserve_headq
, tic
);
2461 /* we've got enough space */
2462 XLOG_GRANT_ADD_SPACE(log
, need_bytes
, 'w');
2463 XLOG_GRANT_ADD_SPACE(log
, need_bytes
, 'r');
2465 tail_lsn
= log
->l_tail_lsn
;
2467 * Check to make sure the grant write head didn't just over lap the
2468 * tail. If the cycles are the same, we can't be overlapping.
2469 * Otherwise, make sure that the cycles differ by exactly one and
2470 * check the byte count.
2472 if (CYCLE_LSN(tail_lsn
, ARCH_NOCONVERT
) != log
->l_grant_write_cycle
) {
2473 ASSERT(log
->l_grant_write_cycle
-1 == CYCLE_LSN(tail_lsn
, ARCH_NOCONVERT
));
2474 ASSERT(log
->l_grant_write_bytes
<= BBTOB(BLOCK_LSN(tail_lsn
, ARCH_NOCONVERT
)));
2477 xlog_trace_loggrant(log
, tic
, "xlog_grant_log_space: exit");
2478 xlog_verify_grant_head(log
, 1);
2479 GRANT_UNLOCK(log
, s
);
2483 if (tic
->t_flags
& XLOG_TIC_IN_Q
)
2484 XLOG_DEL_TICKETQ(log
->l_reserve_headq
, tic
);
2485 xlog_trace_loggrant(log
, tic
, "xlog_grant_log_space: err_ret");
2487 * If we are failing, make sure the ticket doesn't have any
2488 * current reservations. We don't want to add this back when
2489 * the ticket/transaction gets cancelled.
2491 tic
->t_curr_res
= 0;
2492 tic
->t_cnt
= 0; /* ungrant will give back unit_res * t_cnt. */
2493 GRANT_UNLOCK(log
, s
);
2494 return XFS_ERROR(EIO
);
2495 } /* xlog_grant_log_space */
2499 * Replenish the byte reservation required by moving the grant write head.
2504 xlog_regrant_write_log_space(xlog_t
*log
,
2508 int free_bytes
, need_bytes
;
2509 xlog_ticket_t
*ntic
;
2514 tic
->t_curr_res
= tic
->t_unit_res
;
2520 if (log
->l_flags
& XLOG_ACTIVE_RECOVERY
)
2521 panic("regrant Recovery problem");
2524 s
= GRANT_LOCK(log
);
2525 xlog_trace_loggrant(log
, tic
, "xlog_regrant_write_log_space: enter");
2527 if (XLOG_FORCED_SHUTDOWN(log
))
2530 /* If there are other waiters on the queue then give them a
2531 * chance at logspace before us. Wake up the first waiters,
2532 * if we do not wake up all the waiters then go to sleep waiting
2533 * for more free space, otherwise try to get some space for
2537 if ((ntic
= log
->l_write_headq
)) {
2538 free_bytes
= xlog_space_left(log
, log
->l_grant_write_cycle
,
2539 log
->l_grant_write_bytes
);
2541 ASSERT(ntic
->t_flags
& XLOG_TIC_PERM_RESERV
);
2543 if (free_bytes
< ntic
->t_unit_res
)
2545 free_bytes
-= ntic
->t_unit_res
;
2546 sv_signal(&ntic
->t_sema
);
2547 ntic
= ntic
->t_next
;
2548 } while (ntic
!= log
->l_write_headq
);
2550 if (ntic
!= log
->l_write_headq
) {
2551 if ((tic
->t_flags
& XLOG_TIC_IN_Q
) == 0)
2552 XLOG_INS_TICKETQ(log
->l_write_headq
, tic
);
2554 xlog_trace_loggrant(log
, tic
,
2555 "xlog_regrant_write_log_space: sleep 1");
2556 XFS_STATS_INC(xfsstats
.xs_sleep_logspace
);
2557 sv_wait(&tic
->t_sema
, PINOD
|PLTWAIT
,
2558 &log
->l_grant_lock
, s
);
2560 /* If we're shutting down, this tic is already
2562 if (XLOG_FORCED_SHUTDOWN(log
)) {
2563 s
= GRANT_LOCK(log
);
2567 xlog_trace_loggrant(log
, tic
,
2568 "xlog_regrant_write_log_space: wake 1");
2569 xlog_grant_push_ail(log
->l_mp
, tic
->t_unit_res
);
2570 s
= GRANT_LOCK(log
);
2574 need_bytes
= tic
->t_unit_res
;
2577 if (XLOG_FORCED_SHUTDOWN(log
))
2580 free_bytes
= xlog_space_left(log
, log
->l_grant_write_cycle
,
2581 log
->l_grant_write_bytes
);
2582 if (free_bytes
< need_bytes
) {
2583 if ((tic
->t_flags
& XLOG_TIC_IN_Q
) == 0)
2584 XLOG_INS_TICKETQ(log
->l_write_headq
, tic
);
2585 XFS_STATS_INC(xfsstats
.xs_sleep_logspace
);
2586 sv_wait(&tic
->t_sema
, PINOD
|PLTWAIT
, &log
->l_grant_lock
, s
);
2588 /* If we're shutting down, this tic is already off the queue */
2589 if (XLOG_FORCED_SHUTDOWN(log
)) {
2590 s
= GRANT_LOCK(log
);
2594 xlog_trace_loggrant(log
, tic
,
2595 "xlog_regrant_write_log_space: wake 2");
2596 xlog_grant_push_ail(log
->l_mp
, need_bytes
);
2597 s
= GRANT_LOCK(log
);
2599 } else if (tic
->t_flags
& XLOG_TIC_IN_Q
)
2600 XLOG_DEL_TICKETQ(log
->l_write_headq
, tic
);
2602 XLOG_GRANT_ADD_SPACE(log
, need_bytes
, 'w'); /* we've got enough space */
2604 tail_lsn
= log
->l_tail_lsn
;
2605 if (CYCLE_LSN(tail_lsn
, ARCH_NOCONVERT
) != log
->l_grant_write_cycle
) {
2606 ASSERT(log
->l_grant_write_cycle
-1 == CYCLE_LSN(tail_lsn
, ARCH_NOCONVERT
));
2607 ASSERT(log
->l_grant_write_bytes
<= BBTOB(BLOCK_LSN(tail_lsn
, ARCH_NOCONVERT
)));
2611 xlog_trace_loggrant(log
, tic
, "xlog_regrant_write_log_space: exit");
2612 xlog_verify_grant_head(log
, 1);
2613 GRANT_UNLOCK(log
, s
);
2618 if (tic
->t_flags
& XLOG_TIC_IN_Q
)
2619 XLOG_DEL_TICKETQ(log
->l_reserve_headq
, tic
);
2620 xlog_trace_loggrant(log
, tic
, "xlog_regrant_write_log_space: err_ret");
2622 * If we are failing, make sure the ticket doesn't have any
2623 * current reservations. We don't want to add this back when
2624 * the ticket/transaction gets cancelled.
2626 tic
->t_curr_res
= 0;
2627 tic
->t_cnt
= 0; /* ungrant will give back unit_res * t_cnt. */
2628 GRANT_UNLOCK(log
, s
);
2629 return XFS_ERROR(EIO
);
2630 } /* xlog_regrant_write_log_space */
2633 /* The first cnt-1 times through here we don't need to
2634 * move the grant write head because the permanent
2635 * reservation has reserved cnt times the unit amount.
2636 * Release part of current permanent unit reservation and
2637 * reset current reservation to be one units worth. Also
2638 * move grant reservation head forward.
2641 xlog_regrant_reserve_log_space(xlog_t
*log
,
2642 xlog_ticket_t
*ticket
)
2646 xlog_trace_loggrant(log
, ticket
,
2647 "xlog_regrant_reserve_log_space: enter");
2648 if (ticket
->t_cnt
> 0)
2651 s
= GRANT_LOCK(log
);
2652 XLOG_GRANT_SUB_SPACE(log
, ticket
->t_curr_res
, 'w');
2653 XLOG_GRANT_SUB_SPACE(log
, ticket
->t_curr_res
, 'r');
2654 ticket
->t_curr_res
= ticket
->t_unit_res
;
2655 xlog_trace_loggrant(log
, ticket
,
2656 "xlog_regrant_reserve_log_space: sub current res");
2657 xlog_verify_grant_head(log
, 1);
2659 /* just return if we still have some of the pre-reserved space */
2660 if (ticket
->t_cnt
> 0) {
2661 GRANT_UNLOCK(log
, s
);
2665 XLOG_GRANT_ADD_SPACE(log
, ticket
->t_unit_res
, 'r');
2666 xlog_trace_loggrant(log
, ticket
,
2667 "xlog_regrant_reserve_log_space: exit");
2668 xlog_verify_grant_head(log
, 0);
2669 GRANT_UNLOCK(log
, s
);
2670 ticket
->t_curr_res
= ticket
->t_unit_res
;
2671 } /* xlog_regrant_reserve_log_space */
2675 * Give back the space left from a reservation.
2677 * All the information we need to make a correct determination of space left
2678 * is present. For non-permanent reservations, things are quite easy. The
2679 * count should have been decremented to zero. We only need to deal with the
2680 * space remaining in the current reservation part of the ticket. If the
2681 * ticket contains a permanent reservation, there may be left over space which
2682 * needs to be released. A count of N means that N-1 refills of the current
2683 * reservation can be done before we need to ask for more space. The first
2684 * one goes to fill up the first current reservation. Once we run out of
2685 * space, the count will stay at zero and the only space remaining will be
2686 * in the current reservation field.
2689 xlog_ungrant_log_space(xlog_t
*log
,
2690 xlog_ticket_t
*ticket
)
2694 if (ticket
->t_cnt
> 0)
2697 s
= GRANT_LOCK(log
);
2698 xlog_trace_loggrant(log
, ticket
, "xlog_ungrant_log_space: enter");
2700 XLOG_GRANT_SUB_SPACE(log
, ticket
->t_curr_res
, 'w');
2701 XLOG_GRANT_SUB_SPACE(log
, ticket
->t_curr_res
, 'r');
2703 xlog_trace_loggrant(log
, ticket
, "xlog_ungrant_log_space: sub current");
2705 /* If this is a permanent reservation ticket, we may be able to free
2706 * up more space based on the remaining count.
2708 if (ticket
->t_cnt
> 0) {
2709 ASSERT(ticket
->t_flags
& XLOG_TIC_PERM_RESERV
);
2710 XLOG_GRANT_SUB_SPACE(log
, ticket
->t_unit_res
*ticket
->t_cnt
,'w');
2711 XLOG_GRANT_SUB_SPACE(log
, ticket
->t_unit_res
*ticket
->t_cnt
,'r');
2714 xlog_trace_loggrant(log
, ticket
, "xlog_ungrant_log_space: exit");
2715 xlog_verify_grant_head(log
, 1);
2716 GRANT_UNLOCK(log
, s
);
2717 xfs_log_move_tail(log
->l_mp
, 1);
2718 } /* xlog_ungrant_log_space */
2722 * Atomically put back used ticket.
2725 xlog_state_put_ticket(xlog_t
*log
,
2731 xlog_ticket_put(log
, tic
);
2733 } /* xlog_state_put_ticket */
2736 * Flush iclog to disk if this is the last reference to the given iclog and
2737 * the WANT_SYNC bit is set.
2739 * When this function is entered, the iclog is not necessarily in the
2740 * WANT_SYNC state. It may be sitting around waiting to get filled.
2745 xlog_state_release_iclog(xlog_t
*log
,
2746 xlog_in_core_t
*iclog
)
2749 int sync
= 0; /* do we sync? */
2751 xlog_assign_tail_lsn(log
->l_mp
);
2755 if (iclog
->ic_state
& XLOG_STATE_IOERROR
) {
2757 return XFS_ERROR(EIO
);
2760 ASSERT(iclog
->ic_refcnt
> 0);
2761 ASSERT(iclog
->ic_state
== XLOG_STATE_ACTIVE
||
2762 iclog
->ic_state
== XLOG_STATE_WANT_SYNC
);
2764 if (--iclog
->ic_refcnt
== 0 &&
2765 iclog
->ic_state
== XLOG_STATE_WANT_SYNC
) {
2767 iclog
->ic_state
= XLOG_STATE_SYNCING
;
2768 INT_SET(iclog
->ic_header
.h_tail_lsn
, ARCH_CONVERT
, log
->l_tail_lsn
);
2769 xlog_verify_tail_lsn(log
, iclog
, log
->l_tail_lsn
);
2770 /* cycle incremented when incrementing curr_block */
2776 * We let the log lock go, so it's possible that we hit a log I/O
2777 * error or someother SHUTDOWN condition that marks the iclog
2778 * as XLOG_STATE_IOERROR before the bwrite. However, we know that
2779 * this iclog has consistent data, so we ignore IOERROR
2780 * flags after this point.
2783 return xlog_sync(log
, iclog
);
2787 } /* xlog_state_release_iclog */
2791 * This routine will mark the current iclog in the ring as WANT_SYNC
2792 * and move the current iclog pointer to the next iclog in the ring.
2793 * When this routine is called from xlog_state_get_iclog_space(), the
2794 * exact size of the iclog has not yet been determined. All we know is
2795 * that every data block. We have run out of space in this log record.
2798 xlog_state_switch_iclogs(xlog_t
*log
,
2799 xlog_in_core_t
*iclog
,
2804 ASSERT(iclog
->ic_state
== XLOG_STATE_ACTIVE
);
2806 eventual_size
= iclog
->ic_offset
;
2807 iclog
->ic_state
= XLOG_STATE_WANT_SYNC
;
2808 INT_SET(iclog
->ic_header
.h_prev_block
, ARCH_CONVERT
, log
->l_prev_block
);
2809 log
->l_prev_block
= log
->l_curr_block
;
2810 log
->l_prev_cycle
= log
->l_curr_cycle
;
2812 /* roll log?: ic_offset changed later */
2813 log
->l_curr_block
+= BTOBB(eventual_size
)+BTOBB(log
->l_iclog_hsize
);
2815 /* Round up to next log-sunit */
2816 if (XFS_SB_VERSION_HASLOGV2(&log
->l_mp
->m_sb
)) {
2817 if (log
->l_curr_block
& (log
->l_stripemask
- 1)) {
2818 roundup
= log
->l_stripemask
-
2819 (log
->l_curr_block
& (log
->l_stripemask
- 1));
2823 log
->l_curr_block
+= roundup
;
2826 if (log
->l_curr_block
>= log
->l_logBBsize
) {
2827 log
->l_curr_cycle
++;
2828 if (log
->l_curr_cycle
== XLOG_HEADER_MAGIC_NUM
)
2829 log
->l_curr_cycle
++;
2830 log
->l_curr_block
-= log
->l_logBBsize
;
2831 ASSERT(log
->l_curr_block
>= 0);
2833 ASSERT(iclog
== log
->l_iclog
);
2834 log
->l_iclog
= iclog
->ic_next
;
2835 } /* xlog_state_switch_iclogs */
2839 * Write out all data in the in-core log as of this exact moment in time.
2841 * Data may be written to the in-core log during this call. However,
2842 * we don't guarantee this data will be written out. A change from past
2843 * implementation means this routine will *not* write out zero length LRs.
2845 * Basically, we try and perform an intelligent scan of the in-core logs.
2846 * If we determine there is no flushable data, we just return. There is no
2847 * flushable data if:
2849 * 1. the current iclog is active and has no data; the previous iclog
2850 * is in the active or dirty state.
2851 * 2. the current iclog is drity, and the previous iclog is in the
2852 * active or dirty state.
2854 * We may sleep (call psema) if:
2856 * 1. the current iclog is not in the active nor dirty state.
2857 * 2. the current iclog dirty, and the previous iclog is not in the
2858 * active nor dirty state.
2859 * 3. the current iclog is active, and there is another thread writing
2860 * to this particular iclog.
2861 * 4. a) the current iclog is active and has no other writers
2862 * b) when we return from flushing out this iclog, it is still
2863 * not in the active nor dirty state.
2866 xlog_state_sync_all(xlog_t
*log
, uint flags
)
2868 xlog_in_core_t
*iclog
;
2874 iclog
= log
->l_iclog
;
2875 if (iclog
->ic_state
& XLOG_STATE_IOERROR
) {
2877 return XFS_ERROR(EIO
);
2880 /* If the head iclog is not active nor dirty, we just attach
2881 * ourselves to the head and go to sleep.
2883 if (iclog
->ic_state
== XLOG_STATE_ACTIVE
||
2884 iclog
->ic_state
== XLOG_STATE_DIRTY
) {
2886 * If the head is dirty or (active and empty), then
2887 * we need to look at the previous iclog. If the previous
2888 * iclog is active or dirty we are done. There is nothing
2889 * to sync out. Otherwise, we attach ourselves to the
2890 * previous iclog and go to sleep.
2892 if (iclog
->ic_state
== XLOG_STATE_DIRTY
||
2893 (iclog
->ic_refcnt
== 0 && iclog
->ic_offset
== 0)) {
2894 iclog
= iclog
->ic_prev
;
2895 if (iclog
->ic_state
== XLOG_STATE_ACTIVE
||
2896 iclog
->ic_state
== XLOG_STATE_DIRTY
)
2901 if (iclog
->ic_refcnt
== 0) {
2902 /* We are the only one with access to this
2903 * iclog. Flush it out now. There should
2904 * be a roundoff of zero to show that someone
2905 * has already taken care of the roundoff from
2906 * the previous sync.
2908 ASSERT(iclog
->ic_roundoff
== 0);
2910 lsn
= INT_GET(iclog
->ic_header
.h_lsn
, ARCH_CONVERT
);
2911 xlog_state_switch_iclogs(log
, iclog
, 0);
2914 if (xlog_state_release_iclog(log
, iclog
))
2915 return XFS_ERROR(EIO
);
2917 if (INT_GET(iclog
->ic_header
.h_lsn
, ARCH_CONVERT
) == lsn
&&
2918 iclog
->ic_state
!= XLOG_STATE_DIRTY
)
2923 /* Someone else is writing to this iclog.
2924 * Use its call to flush out the data. However,
2925 * the other thread may not force out this LR,
2926 * so we mark it WANT_SYNC.
2928 xlog_state_switch_iclogs(log
, iclog
, 0);
2934 /* By the time we come around again, the iclog could've been filled
2935 * which would give it another lsn. If we have a new lsn, just
2936 * return because the relevant data has been flushed.
2939 if (flags
& XFS_LOG_SYNC
) {
2941 * We must check if we're shutting down here, before
2942 * we wait, while we're holding the LOG_LOCK.
2943 * Then we check again after waking up, in case our
2944 * sleep was disturbed by a bad news.
2946 if (iclog
->ic_state
& XLOG_STATE_IOERROR
) {
2948 return XFS_ERROR(EIO
);
2950 XFS_STATS_INC(xfsstats
.xs_log_force_sleep
);
2951 sv_wait(&iclog
->ic_forcesema
, PINOD
, &log
->l_icloglock
, s
);
2953 * No need to grab the log lock here since we're
2954 * only deciding whether or not to return EIO
2955 * and the memory read should be atomic.
2957 if (iclog
->ic_state
& XLOG_STATE_IOERROR
)
2958 return XFS_ERROR(EIO
);
2966 } /* xlog_state_sync_all */
2970 * Used by code which implements synchronous log forces.
2972 * Find in-core log with lsn.
2973 * If it is in the DIRTY state, just return.
2974 * If it is in the ACTIVE state, move the in-core log into the WANT_SYNC
2975 * state and go to sleep or return.
2976 * If it is in any other state, go to sleep or return.
2978 * If filesystem activity goes to zero, the iclog will get flushed only by
2982 xlog_state_sync(xlog_t
*log
,
2986 xlog_in_core_t
*iclog
;
2987 int already_slept
= 0;
2993 iclog
= log
->l_iclog
;
2995 if (iclog
->ic_state
& XLOG_STATE_IOERROR
) {
2997 return XFS_ERROR(EIO
);
3001 if (INT_GET(iclog
->ic_header
.h_lsn
, ARCH_CONVERT
) != lsn
) {
3002 iclog
= iclog
->ic_next
;
3006 if (iclog
->ic_state
== XLOG_STATE_DIRTY
) {
3011 if (iclog
->ic_state
== XLOG_STATE_ACTIVE
) {
3013 * We sleep here if we haven't already slept (e.g.
3014 * this is the first time we've looked at the correct
3015 * iclog buf) and the buffer before us is going to
3016 * be sync'ed. The reason for this is that if we
3017 * are doing sync transactions here, by waiting for
3018 * the previous I/O to complete, we can allow a few
3019 * more transactions into this iclog before we close
3022 * Otherwise, we mark the buffer WANT_SYNC, and bump
3023 * up the refcnt so we can release the log (which drops
3024 * the ref count). The state switch keeps new transaction
3025 * commits from using this buffer. When the current commits
3026 * finish writing into the buffer, the refcount will drop to
3027 * zero and the buffer will go out then.
3029 if (!already_slept
&&
3030 (iclog
->ic_prev
->ic_state
& (XLOG_STATE_WANT_SYNC
|
3031 XLOG_STATE_SYNCING
))) {
3032 ASSERT(!(iclog
->ic_state
& XLOG_STATE_IOERROR
));
3033 XFS_STATS_INC(xfsstats
.xs_log_force_sleep
);
3034 sv_wait(&iclog
->ic_prev
->ic_writesema
, PSWP
,
3035 &log
->l_icloglock
, s
);
3040 xlog_state_switch_iclogs(log
, iclog
, 0);
3042 if (xlog_state_release_iclog(log
, iclog
))
3043 return XFS_ERROR(EIO
);
3048 if ((flags
& XFS_LOG_SYNC
) && /* sleep */
3049 !(iclog
->ic_state
& (XLOG_STATE_ACTIVE
| XLOG_STATE_DIRTY
))) {
3052 * Don't wait on the forcesema if we know that we've
3053 * gotten a log write error.
3055 if (iclog
->ic_state
& XLOG_STATE_IOERROR
) {
3057 return XFS_ERROR(EIO
);
3059 XFS_STATS_INC(xfsstats
.xs_log_force_sleep
);
3060 sv_wait(&iclog
->ic_forcesema
, PSWP
, &log
->l_icloglock
, s
);
3062 * No need to grab the log lock here since we're
3063 * only deciding whether or not to return EIO
3064 * and the memory read should be atomic.
3066 if (iclog
->ic_state
& XLOG_STATE_IOERROR
)
3067 return XFS_ERROR(EIO
);
3068 } else { /* just return */
3073 } while (iclog
!= log
->l_iclog
);
3077 } /* xlog_state_sync */
3081 * Called when we want to mark the current iclog as being ready to sync to
3085 xlog_state_want_sync(xlog_t
*log
, xlog_in_core_t
*iclog
)
3091 if (iclog
->ic_state
== XLOG_STATE_ACTIVE
) {
3092 xlog_state_switch_iclogs(log
, iclog
, 0);
3094 ASSERT(iclog
->ic_state
&
3095 (XLOG_STATE_WANT_SYNC
|XLOG_STATE_IOERROR
));
3099 } /* xlog_state_want_sync */
3103 /*****************************************************************************
3107 *****************************************************************************
3111 * Algorithm doesn't take into account page size. ;-(
3114 xlog_state_ticket_alloc(xlog_t
*log
)
3116 xlog_ticket_t
*t_list
;
3117 xlog_ticket_t
*next
;
3119 uint i
= (NBPP
/ sizeof(xlog_ticket_t
)) - 2;
3123 * The kmem_zalloc may sleep, so we shouldn't be holding the
3124 * global lock. XXXmiken: may want to use zone allocator.
3126 buf
= (xfs_caddr_t
) kmem_zalloc(NBPP
, 0);
3130 /* Attach 1st ticket to Q, so we can keep track of allocated memory */
3131 t_list
= (xlog_ticket_t
*)buf
;
3132 t_list
->t_next
= log
->l_unmount_free
;
3133 log
->l_unmount_free
= t_list
++;
3134 log
->l_ticket_cnt
++;
3135 log
->l_ticket_tcnt
++;
3137 /* Next ticket becomes first ticket attached to ticket free list */
3138 if (log
->l_freelist
!= NULL
) {
3139 ASSERT(log
->l_tail
!= NULL
);
3140 log
->l_tail
->t_next
= t_list
;
3142 log
->l_freelist
= t_list
;
3144 log
->l_ticket_cnt
++;
3145 log
->l_ticket_tcnt
++;
3147 /* Cycle through rest of alloc'ed memory, building up free Q */
3148 for ( ; i
> 0; i
--) {
3150 t_list
->t_next
= next
;
3152 log
->l_ticket_cnt
++;
3153 log
->l_ticket_tcnt
++;
3156 log
->l_tail
= t_list
;
3158 } /* xlog_state_ticket_alloc */
3162 * Put ticket into free list
3164 * Assumption: log lock is held around this call.
3167 xlog_ticket_put(xlog_t
*log
,
3168 xlog_ticket_t
*ticket
)
3170 sv_destroy(&ticket
->t_sema
);
3173 * Don't think caching will make that much difference. It's
3174 * more important to make debug easier.
3177 /* real code will want to use LIFO for caching */
3178 ticket
->t_next
= log
->l_freelist
;
3179 log
->l_freelist
= ticket
;
3180 /* no need to clear fields */
3182 /* When we debug, it is easier if tickets are cycled */
3184 if (log
->l_tail
!= 0) {
3185 log
->l_tail
->t_next
= ticket
;
3187 ASSERT(log
->l_freelist
== 0);
3188 log
->l_freelist
= ticket
;
3190 log
->l_tail
= ticket
;
3192 log
->l_ticket_cnt
++;
3193 } /* xlog_ticket_put */
3197 * Grab ticket off freelist or allocation some more
3200 xlog_ticket_get(xlog_t
*log
,
3210 if (log
->l_freelist
== NULL
)
3211 xlog_state_ticket_alloc(log
); /* potentially sleep */
3214 if (log
->l_freelist
== NULL
) {
3218 tic
= log
->l_freelist
;
3219 log
->l_freelist
= tic
->t_next
;
3220 if (log
->l_freelist
== NULL
)
3222 log
->l_ticket_cnt
--;
3226 * Permanent reservations have up to 'cnt'-1 active log operations
3227 * in the log. A unit in this case is the amount of space for one
3228 * of these log operations. Normal reservations have a cnt of 1
3229 * and their unit amount is the total amount of space required.
3230 * The following line of code adds one log record header length
3231 * for each part of an operation which may fall on a different
3234 * One more XLOG_HEADER_SIZE is added to account for possible
3235 * round off errors when syncing a LR to disk. The bytes are
3236 * subtracted if the thread using this ticket is the first writer
3239 * We add an extra log header for the possibility that the commit
3240 * record is the first data written to a new log record. In this
3241 * case it is separate from the rest of the transaction data and
3242 * will be charged for the log record header.
3244 unit_bytes
+= log
->l_iclog_hsize
* (XLOG_BTOLRBB(unit_bytes
) + 2);
3246 tic
->t_unit_res
= unit_bytes
;
3247 tic
->t_curr_res
= unit_bytes
;
3250 tic
->t_tid
= (xlog_tid_t
)((__psint_t
)tic
& 0xffffffff);
3251 tic
->t_clientid
= client
;
3252 tic
->t_flags
= XLOG_TIC_INITED
;
3253 if (xflags
& XFS_LOG_PERM_RESERV
)
3254 tic
->t_flags
|= XLOG_TIC_PERM_RESERV
;
3255 sv_init(&(tic
->t_sema
), SV_DEFAULT
, "logtick");
3258 } /* xlog_ticket_get */
3261 /******************************************************************************
3263 * Log debug routines
3265 ******************************************************************************
3267 #if defined(DEBUG) && !defined(XLOG_NOLOG)
3269 * Make sure that the destination ptr is within the valid data region of
3270 * one of the iclogs. This uses backup pointers stored in a different
3271 * part of the log in case we trash the log structure.
3274 xlog_verify_dest_ptr(xlog_t
*log
,
3280 for (i
=0; i
< log
->l_iclog_bufs
; i
++) {
3281 if (ptr
>= (__psint_t
)log
->l_iclog_bak
[i
] &&
3282 ptr
<= (__psint_t
)log
->l_iclog_bak
[i
]+log
->l_iclog_size
)
3286 xlog_panic("xlog_verify_dest_ptr: invalid ptr");
3287 } /* xlog_verify_dest_ptr */
3291 /* check split LR write */
3293 xlog_verify_disk_cycle_no(xlog_t
*log
,
3294 xlog_in_core_t
*iclog
)
3301 if (BLOCK_LSN(iclog
->ic_header
.h_lsn
, ARCH_CONVERT
) < 10) {
3302 cycle_no
= CYCLE_LSN(iclog
->ic_header
.h_lsn
, ARCH_CONVERT
);
3303 bp
= xlog_get_bp(log
, 1);
3305 for (i
= 0; i
< BLOCK_LSN(iclog
->ic_header
.h_lsn
, ARCH_CONVERT
); i
++) {
3306 xlog_bread(log
, i
, 1, bp
);
3307 ptr
= xlog_align(log
, i
, 1, bp
);
3308 if (GET_CYCLE(ptr
, ARCH_CONVERT
) != cycle_no
)
3309 xlog_warn("XFS: xlog_verify_disk_cycle_no: bad cycle no");
3313 } /* xlog_verify_disk_cycle_no */
3317 xlog_verify_grant_head(xlog_t
*log
, int equals
)
3319 if (log
->l_grant_reserve_cycle
== log
->l_grant_write_cycle
) {
3321 ASSERT(log
->l_grant_reserve_bytes
>= log
->l_grant_write_bytes
);
3323 ASSERT(log
->l_grant_reserve_bytes
> log
->l_grant_write_bytes
);
3325 ASSERT(log
->l_grant_reserve_cycle
-1 == log
->l_grant_write_cycle
);
3326 ASSERT(log
->l_grant_write_bytes
>= log
->l_grant_reserve_bytes
);
3328 } /* xlog_verify_grant_head */
3330 /* check if it will fit */
3332 xlog_verify_tail_lsn(xlog_t
*log
,
3333 xlog_in_core_t
*iclog
,
3338 if (CYCLE_LSN(tail_lsn
, ARCH_NOCONVERT
) == log
->l_prev_cycle
) {
3340 log
->l_logBBsize
- (log
->l_prev_block
- BLOCK_LSN(tail_lsn
, ARCH_NOCONVERT
));
3341 if (blocks
< BTOBB(iclog
->ic_offset
)+BTOBB(log
->l_iclog_hsize
))
3342 xlog_panic("xlog_verify_tail_lsn: ran out of log space");
3344 ASSERT(CYCLE_LSN(tail_lsn
, ARCH_NOCONVERT
)+1 == log
->l_prev_cycle
);
3346 if (BLOCK_LSN(tail_lsn
, ARCH_NOCONVERT
) == log
->l_prev_block
)
3347 xlog_panic("xlog_verify_tail_lsn: tail wrapped");
3349 blocks
= BLOCK_LSN(tail_lsn
, ARCH_NOCONVERT
) - log
->l_prev_block
;
3350 if (blocks
< BTOBB(iclog
->ic_offset
) + 1)
3351 xlog_panic("xlog_verify_tail_lsn: ran out of log space");
3353 } /* xlog_verify_tail_lsn */
3356 * Perform a number of checks on the iclog before writing to disk.
3358 * 1. Make sure the iclogs are still circular
3359 * 2. Make sure we have a good magic number
3360 * 3. Make sure we don't have magic numbers in the data
3361 * 4. Check fields of each log operation header for:
3362 * A. Valid client identifier
3363 * B. tid ptr value falls in valid ptr space (user space code)
3364 * C. Length in log record header is correct according to the
3365 * individual operation headers within record.
3366 * 5. When a bwrite will occur within 5 blocks of the front of the physical
3367 * log, check the preceding blocks of the physical log to make sure all
3368 * the cycle numbers agree with the current cycle number.
3371 xlog_verify_iclog(xlog_t
*log
,
3372 xlog_in_core_t
*iclog
,
3376 xlog_op_header_t
*ophead
;
3377 xlog_in_core_t
*icptr
;
3379 xfs_caddr_t base_ptr
;
3380 __psint_t field_offset
;
3382 int len
, i
, j
, k
, op_len
;
3387 xlog_rec_ext_header_t hic_xheader
;
3388 char hic_sector
[XLOG_HEADER_SIZE
];
3391 /* check validity of iclog pointers */
3393 icptr
= log
->l_iclog
;
3394 for (i
=0; i
< log
->l_iclog_bufs
; i
++) {
3396 xlog_panic("xlog_verify_iclog: illegal ptr");
3397 icptr
= icptr
->ic_next
;
3399 if (icptr
!= log
->l_iclog
)
3400 xlog_panic("xlog_verify_iclog: corrupt iclog ring");
3403 /* check log magic numbers */
3404 ptr
= (xfs_caddr_t
) &(iclog
->ic_header
);
3405 if (INT_GET(*(uint
*)ptr
, ARCH_CONVERT
) != XLOG_HEADER_MAGIC_NUM
)
3406 xlog_panic("xlog_verify_iclog: illegal magic num");
3408 for (ptr
+= BBSIZE
; ptr
< ((xfs_caddr_t
)&(iclog
->ic_header
))+count
;
3410 if (INT_GET(*(uint
*)ptr
, ARCH_CONVERT
) == XLOG_HEADER_MAGIC_NUM
)
3411 xlog_panic("xlog_verify_iclog: unexpected magic num");
3415 len
= INT_GET(iclog
->ic_header
.h_num_logops
, ARCH_CONVERT
);
3416 ptr
= iclog
->ic_datap
;
3418 ophead
= (xlog_op_header_t
*)ptr
;
3419 xhdr
= (union ich
*)&iclog
->ic_header
;
3420 for (i
= 0; i
< len
; i
++) {
3421 ophead
= (xlog_op_header_t
*)ptr
;
3423 /* clientid is only 1 byte */
3424 field_offset
= (__psint_t
)
3425 ((xfs_caddr_t
)&(ophead
->oh_clientid
) - base_ptr
);
3426 if (syncing
== B_FALSE
|| (field_offset
& 0x1ff)) {
3427 clientid
= ophead
->oh_clientid
;
3429 idx
= BTOBB((xfs_caddr_t
)&(ophead
->oh_clientid
) - iclog
->ic_datap
);
3430 if (idx
> (XLOG_HEADER_CYCLE_SIZE
/ BBSIZE
)) {
3431 j
= idx
/ (XLOG_HEADER_CYCLE_SIZE
/ BBSIZE
);
3432 k
= idx
% (XLOG_HEADER_CYCLE_SIZE
/ BBSIZE
);
3433 clientid
= GET_CLIENT_ID(xhdr
[j
].hic_xheader
.xh_cycle_data
[k
], ARCH_CONVERT
);
3435 clientid
= GET_CLIENT_ID(iclog
->ic_header
.h_cycle_data
[idx
], ARCH_CONVERT
);
3438 if (clientid
!= XFS_TRANSACTION
&& clientid
!= XFS_LOG
)
3439 cmn_err(CE_WARN
, "xlog_verify_iclog: illegal clientid %d op 0x%p offset 0x%x", clientid
, ophead
, field_offset
);
3442 field_offset
= (__psint_t
)
3443 ((xfs_caddr_t
)&(ophead
->oh_len
) - base_ptr
);
3444 if (syncing
== B_FALSE
|| (field_offset
& 0x1ff)) {
3445 op_len
= INT_GET(ophead
->oh_len
, ARCH_CONVERT
);
3447 idx
= BTOBB((__psint_t
)&ophead
->oh_len
-
3448 (__psint_t
)iclog
->ic_datap
);
3449 if (idx
> (XLOG_HEADER_CYCLE_SIZE
/ BBSIZE
)) {
3450 j
= idx
/ (XLOG_HEADER_CYCLE_SIZE
/ BBSIZE
);
3451 k
= idx
% (XLOG_HEADER_CYCLE_SIZE
/ BBSIZE
);
3452 op_len
= INT_GET(xhdr
[j
].hic_xheader
.xh_cycle_data
[k
], ARCH_CONVERT
);
3454 op_len
= INT_GET(iclog
->ic_header
.h_cycle_data
[idx
], ARCH_CONVERT
);
3457 ptr
+= sizeof(xlog_op_header_t
) + op_len
;
3459 } /* xlog_verify_iclog */
3460 #endif /* DEBUG && !XLOG_NOLOG */
3463 * Mark all iclogs IOERROR. LOG_LOCK is held by the caller.
3469 xlog_in_core_t
*iclog
, *ic
;
3471 iclog
= log
->l_iclog
;
3472 if (! (iclog
->ic_state
& XLOG_STATE_IOERROR
)) {
3474 * Mark all the incore logs IOERROR.
3475 * From now on, no log flushes will result.
3479 ic
->ic_state
= XLOG_STATE_IOERROR
;
3481 } while (ic
!= iclog
);
3485 * Return non-zero, if state transition has already happened.
3491 * This is called from xfs_force_shutdown, when we're forcibly
3492 * shutting down the filesystem, typically because of an IO error.
3493 * Our main objectives here are to make sure that:
3494 * a. the filesystem gets marked 'SHUTDOWN' for all interested
3495 * parties to find out, 'atomically'.
3496 * b. those who're sleeping on log reservations, pinned objects and
3497 * other resources get woken up, and be told the bad news.
3498 * c. nothing new gets queued up after (a) and (b) are done.
3499 * d. if !logerror, flush the iclogs to disk, then seal them off
3503 xfs_log_force_umount(
3504 struct xfs_mount
*mp
,
3516 * If this happens during log recovery, don't worry about
3517 * locking; the log isn't open for business yet.
3520 log
->l_flags
& XLOG_ACTIVE_RECOVERY
) {
3521 mp
->m_flags
|= XFS_MOUNT_FS_SHUTDOWN
;
3522 XFS_BUF_DONE(mp
->m_sb_bp
);
3527 * Somebody could've already done the hard work for us.
3528 * No need to get locks for this.
3530 if (logerror
&& log
->l_iclog
->ic_state
& XLOG_STATE_IOERROR
) {
3531 ASSERT(XLOG_FORCED_SHUTDOWN(log
));
3536 * We must hold both the GRANT lock and the LOG lock,
3537 * before we mark the filesystem SHUTDOWN and wake
3538 * everybody up to tell the bad news.
3540 s
= GRANT_LOCK(log
);
3542 mp
->m_flags
|= XFS_MOUNT_FS_SHUTDOWN
;
3543 XFS_BUF_DONE(mp
->m_sb_bp
);
3545 * This flag is sort of redundant because of the mount flag, but
3546 * it's good to maintain the separation between the log and the rest
3549 log
->l_flags
|= XLOG_IO_ERROR
;
3552 * If we hit a log error, we want to mark all the iclogs IOERROR
3553 * while we're still holding the loglock.
3556 retval
= xlog_state_ioerror(log
);
3557 LOG_UNLOCK(log
, s2
);
3560 * We don't want anybody waiting for log reservations
3561 * after this. That means we have to wake up everybody
3562 * queued up on reserve_headq as well as write_headq.
3563 * In addition, we make sure in xlog_{re}grant_log_space
3564 * that we don't enqueue anything once the SHUTDOWN flag
3565 * is set, and this action is protected by the GRANTLOCK.
3567 if ((tic
= log
->l_reserve_headq
)) {
3569 sv_signal(&tic
->t_sema
);
3571 } while (tic
!= log
->l_reserve_headq
);
3574 if ((tic
= log
->l_write_headq
)) {
3576 sv_signal(&tic
->t_sema
);
3578 } while (tic
!= log
->l_write_headq
);
3580 GRANT_UNLOCK(log
, s
);
3582 if (! (log
->l_iclog
->ic_state
& XLOG_STATE_IOERROR
)) {
3585 * Force the incore logs to disk before shutting the
3586 * log down completely.
3588 xlog_state_sync_all(log
, XFS_LOG_FORCE
|XFS_LOG_SYNC
);
3590 retval
= xlog_state_ioerror(log
);
3591 LOG_UNLOCK(log
, s2
);
3594 * Wake up everybody waiting on xfs_log_force.
3595 * Callback all log item committed functions as if the
3596 * log writes were completed.
3598 xlog_state_do_callback(log
, XFS_LI_ABORTED
, NULL
);
3600 #ifdef XFSERRORDEBUG
3602 xlog_in_core_t
*iclog
;
3605 iclog
= log
->l_iclog
;
3607 ASSERT(iclog
->ic_callback
== 0);
3608 iclog
= iclog
->ic_next
;
3609 } while (iclog
!= log
->l_iclog
);
3613 /* return non-zero if log IOERROR transition had already happened */
3618 xlog_iclogs_empty(xlog_t
*log
)
3620 xlog_in_core_t
*iclog
;
3622 iclog
= log
->l_iclog
;
3624 /* endianness does not matter here, zero is zero in
3627 if (iclog
->ic_header
.h_num_logops
)
3629 iclog
= iclog
->ic_next
;
3630 } while (iclog
!= log
->l_iclog
);