2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
3 * Copyright (c) 2008 Dave Chinner
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it would be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
21 #include "xfs_types.h"
23 #include "xfs_trans.h"
26 #include "xfs_mount.h"
27 #include "xfs_trans_priv.h"
28 #include "xfs_trace.h"
29 #include "xfs_error.h"
33 * Check that the list is sorted as it should be.
40 xfs_log_item_t
*prev_lip
;
42 if (list_empty(&ailp
->xa_ail
))
46 * Check the next and previous entries are valid.
48 ASSERT((lip
->li_flags
& XFS_LI_IN_AIL
) != 0);
49 prev_lip
= list_entry(lip
->li_ail
.prev
, xfs_log_item_t
, li_ail
);
50 if (&prev_lip
->li_ail
!= &ailp
->xa_ail
)
51 ASSERT(XFS_LSN_CMP(prev_lip
->li_lsn
, lip
->li_lsn
) <= 0);
53 prev_lip
= list_entry(lip
->li_ail
.next
, xfs_log_item_t
, li_ail
);
54 if (&prev_lip
->li_ail
!= &ailp
->xa_ail
)
55 ASSERT(XFS_LSN_CMP(prev_lip
->li_lsn
, lip
->li_lsn
) >= 0);
60 #define xfs_ail_check(a,l)
64 * Return a pointer to the first item in the AIL. If the AIL is empty, then
71 if (list_empty(&ailp
->xa_ail
))
74 return list_first_entry(&ailp
->xa_ail
, xfs_log_item_t
, li_ail
);
78 * Return a pointer to the last item in the AIL. If the AIL is empty, then
81 static xfs_log_item_t
*
85 if (list_empty(&ailp
->xa_ail
))
88 return list_entry(ailp
->xa_ail
.prev
, xfs_log_item_t
, li_ail
);
92 * Return a pointer to the item which follows the given item in the AIL. If
93 * the given item is the last item in the list, then return NULL.
95 static xfs_log_item_t
*
100 if (lip
->li_ail
.next
== &ailp
->xa_ail
)
103 return list_first_entry(&lip
->li_ail
, xfs_log_item_t
, li_ail
);
107 * This is called by the log manager code to determine the LSN of the tail of
108 * the log. This is exactly the LSN of the first item in the AIL. If the AIL
109 * is empty, then this function returns 0.
111 * We need the AIL lock in order to get a coherent read of the lsn of the last
116 struct xfs_ail
*ailp
)
121 spin_lock(&ailp
->xa_lock
);
122 lip
= xfs_ail_min(ailp
);
125 spin_unlock(&ailp
->xa_lock
);
131 * Return the maximum lsn held in the AIL, or zero if the AIL is empty.
135 struct xfs_ail
*ailp
)
140 spin_lock(&ailp
->xa_lock
);
141 lip
= xfs_ail_max(ailp
);
144 spin_unlock(&ailp
->xa_lock
);
150 * The cursor keeps track of where our current traversal is up to by tracking
151 * the next item in the list for us. However, for this to be safe, removing an
152 * object from the AIL needs to invalidate any cursor that points to it. hence
153 * the traversal cursor needs to be linked to the struct xfs_ail so that
154 * deletion can search all the active cursors for invalidation.
157 xfs_trans_ail_cursor_init(
158 struct xfs_ail
*ailp
,
159 struct xfs_ail_cursor
*cur
)
162 list_add_tail(&cur
->list
, &ailp
->xa_cursors
);
166 * Get the next item in the traversal and advance the cursor. If the cursor
167 * was invalidated (indicated by a lip of 1), restart the traversal.
169 struct xfs_log_item
*
170 xfs_trans_ail_cursor_next(
171 struct xfs_ail
*ailp
,
172 struct xfs_ail_cursor
*cur
)
174 struct xfs_log_item
*lip
= cur
->item
;
176 if ((__psint_t
)lip
& 1)
177 lip
= xfs_ail_min(ailp
);
179 cur
->item
= xfs_ail_next(ailp
, lip
);
184 * When the traversal is complete, we need to remove the cursor from the list
185 * of traversing cursors.
188 xfs_trans_ail_cursor_done(
189 struct xfs_ail
*ailp
,
190 struct xfs_ail_cursor
*cur
)
193 list_del_init(&cur
->list
);
197 * Invalidate any cursor that is pointing to this item. This is called when an
198 * item is removed from the AIL. Any cursor pointing to this object is now
199 * invalid and the traversal needs to be terminated so it doesn't reference a
200 * freed object. We set the low bit of the cursor item pointer so we can
201 * distinguish between an invalidation and the end of the list when getting the
202 * next item from the cursor.
205 xfs_trans_ail_cursor_clear(
206 struct xfs_ail
*ailp
,
207 struct xfs_log_item
*lip
)
209 struct xfs_ail_cursor
*cur
;
211 list_for_each_entry(cur
, &ailp
->xa_cursors
, list
) {
212 if (cur
->item
== lip
)
213 cur
->item
= (struct xfs_log_item
*)
214 ((__psint_t
)cur
->item
| 1);
219 * Find the first item in the AIL with the given @lsn by searching in ascending
220 * LSN order and initialise the cursor to point to the next item for a
221 * ascending traversal. Pass a @lsn of zero to initialise the cursor to the
222 * first item in the AIL. Returns NULL if the list is empty.
225 xfs_trans_ail_cursor_first(
226 struct xfs_ail
*ailp
,
227 struct xfs_ail_cursor
*cur
,
232 xfs_trans_ail_cursor_init(ailp
, cur
);
235 lip
= xfs_ail_min(ailp
);
239 list_for_each_entry(lip
, &ailp
->xa_ail
, li_ail
) {
240 if (XFS_LSN_CMP(lip
->li_lsn
, lsn
) >= 0)
247 cur
->item
= xfs_ail_next(ailp
, lip
);
251 static struct xfs_log_item
*
252 __xfs_trans_ail_cursor_last(
253 struct xfs_ail
*ailp
,
258 list_for_each_entry_reverse(lip
, &ailp
->xa_ail
, li_ail
) {
259 if (XFS_LSN_CMP(lip
->li_lsn
, lsn
) <= 0)
266 * Find the last item in the AIL with the given @lsn by searching in descending
267 * LSN order and initialise the cursor to point to that item. If there is no
268 * item with the value of @lsn, then it sets the cursor to the last item with an
269 * LSN lower than @lsn. Returns NULL if the list is empty.
271 struct xfs_log_item
*
272 xfs_trans_ail_cursor_last(
273 struct xfs_ail
*ailp
,
274 struct xfs_ail_cursor
*cur
,
277 xfs_trans_ail_cursor_init(ailp
, cur
);
278 cur
->item
= __xfs_trans_ail_cursor_last(ailp
, lsn
);
283 * Splice the log item list into the AIL at the given LSN. We splice to the
284 * tail of the given LSN to maintain insert order for push traversals. The
285 * cursor is optional, allowing repeated updates to the same LSN to avoid
286 * repeated traversals. This should not be called with an empty list.
290 struct xfs_ail
*ailp
,
291 struct xfs_ail_cursor
*cur
,
292 struct list_head
*list
,
295 struct xfs_log_item
*lip
;
297 ASSERT(!list_empty(list
));
300 * Use the cursor to determine the insertion point if one is
301 * provided. If not, or if the one we got is not valid,
302 * find the place in the AIL where the items belong.
304 lip
= cur
? cur
->item
: NULL
;
305 if (!lip
|| (__psint_t
) lip
& 1)
306 lip
= __xfs_trans_ail_cursor_last(ailp
, lsn
);
309 * If a cursor is provided, we know we're processing the AIL
310 * in lsn order, and future items to be spliced in will
311 * follow the last one being inserted now. Update the
312 * cursor to point to that last item, now while we have a
313 * reliable pointer to it.
316 cur
->item
= list_entry(list
->prev
, struct xfs_log_item
, li_ail
);
319 * Finally perform the splice. Unless the AIL was empty,
320 * lip points to the item in the AIL _after_ which the new
321 * items should go. If lip is null the AIL was empty, so
322 * the new items go at the head of the AIL.
325 list_splice(list
, &lip
->li_ail
);
327 list_splice(list
, &ailp
->xa_ail
);
331 * Delete the given item from the AIL. Return a pointer to the item.
335 struct xfs_ail
*ailp
,
338 xfs_ail_check(ailp
, lip
);
339 list_del(&lip
->li_ail
);
340 xfs_trans_ail_cursor_clear(ailp
, lip
);
345 struct xfs_ail
*ailp
)
347 xfs_mount_t
*mp
= ailp
->xa_mount
;
348 struct xfs_ail_cursor cur
;
358 * If we encountered pinned items or did not finish writing out all
359 * buffers the last time we ran, force the log first and wait for it
360 * before pushing again.
362 if (ailp
->xa_log_flush
&& ailp
->xa_last_pushed_lsn
== 0 &&
363 (!list_empty_careful(&ailp
->xa_buf_list
) ||
364 xfs_ail_min_lsn(ailp
))) {
365 ailp
->xa_log_flush
= 0;
367 XFS_STATS_INC(xs_push_ail_flush
);
368 xfs_log_force(mp
, XFS_LOG_SYNC
);
371 spin_lock(&ailp
->xa_lock
);
373 /* barrier matches the xa_target update in xfs_ail_push() */
375 target
= ailp
->xa_target
;
376 ailp
->xa_target_prev
= target
;
378 lip
= xfs_trans_ail_cursor_first(ailp
, &cur
, ailp
->xa_last_pushed_lsn
);
381 * If the AIL is empty or our push has reached the end we are
384 xfs_trans_ail_cursor_done(ailp
, &cur
);
385 spin_unlock(&ailp
->xa_lock
);
389 XFS_STATS_INC(xs_push_ail
);
392 while ((XFS_LSN_CMP(lip
->li_lsn
, target
) <= 0)) {
396 * Note that IOP_PUSH may unlock and reacquire the AIL lock. We
397 * rely on the AIL cursor implementation to be able to deal with
400 lock_result
= IOP_PUSH(lip
, &ailp
->xa_buf_list
);
401 switch (lock_result
) {
402 case XFS_ITEM_SUCCESS
:
403 XFS_STATS_INC(xs_push_ail_success
);
404 trace_xfs_ail_push(lip
);
406 ailp
->xa_last_pushed_lsn
= lsn
;
409 case XFS_ITEM_FLUSHING
:
411 * The item or its backing buffer is already beeing
412 * flushed. The typical reason for that is that an
413 * inode buffer is locked because we already pushed the
414 * updates to it as part of inode clustering.
416 * We do not want to to stop flushing just because lots
417 * of items are already beeing flushed, but we need to
418 * re-try the flushing relatively soon if most of the
419 * AIL is beeing flushed.
421 XFS_STATS_INC(xs_push_ail_flushing
);
422 trace_xfs_ail_flushing(lip
);
425 ailp
->xa_last_pushed_lsn
= lsn
;
428 case XFS_ITEM_PINNED
:
429 XFS_STATS_INC(xs_push_ail_pinned
);
430 trace_xfs_ail_pinned(lip
);
433 ailp
->xa_log_flush
++;
435 case XFS_ITEM_LOCKED
:
436 XFS_STATS_INC(xs_push_ail_locked
);
437 trace_xfs_ail_locked(lip
);
449 * Are there too many items we can't do anything with?
451 * If we we are skipping too many items because we can't flush
452 * them or they are already being flushed, we back off and
453 * given them time to complete whatever operation is being
454 * done. i.e. remove pressure from the AIL while we can't make
455 * progress so traversals don't slow down further inserts and
456 * removals to/from the AIL.
458 * The value of 100 is an arbitrary magic number based on
464 lip
= xfs_trans_ail_cursor_next(ailp
, &cur
);
469 xfs_trans_ail_cursor_done(ailp
, &cur
);
470 spin_unlock(&ailp
->xa_lock
);
472 if (xfs_buf_delwri_submit_nowait(&ailp
->xa_buf_list
))
473 ailp
->xa_log_flush
++;
475 if (!count
|| XFS_LSN_CMP(lsn
, target
) >= 0) {
478 * We reached the target or the AIL is empty, so wait a bit
479 * longer for I/O to complete and remove pushed items from the
480 * AIL before we start the next scan from the start of the AIL.
483 ailp
->xa_last_pushed_lsn
= 0;
484 } else if (((stuck
+ flushing
) * 100) / count
> 90) {
486 * Either there is a lot of contention on the AIL or we are
487 * stuck due to operations in progress. "Stuck" in this case
488 * is defined as >90% of the items we tried to push were stuck.
490 * Backoff a bit more to allow some I/O to complete before
491 * restarting from the start of the AIL. This prevents us from
492 * spinning on the same items, and if they are pinned will all
493 * the restart to issue a log force to unpin the stuck items.
496 ailp
->xa_last_pushed_lsn
= 0;
499 * Assume we have more work to do in a short while.
511 struct xfs_ail
*ailp
= data
;
512 long tout
= 0; /* milliseconds */
514 current
->flags
|= PF_MEMALLOC
;
516 while (!kthread_should_stop()) {
517 if (tout
&& tout
<= 20)
518 __set_current_state(TASK_KILLABLE
);
520 __set_current_state(TASK_INTERRUPTIBLE
);
522 spin_lock(&ailp
->xa_lock
);
525 * Idle if the AIL is empty and we are not racing with a target
526 * update. We check the AIL after we set the task to a sleep
527 * state to guarantee that we either catch an xa_target update
528 * or that a wake_up resets the state to TASK_RUNNING.
529 * Otherwise, we run the risk of sleeping indefinitely.
531 * The barrier matches the xa_target update in xfs_ail_push().
534 if (!xfs_ail_min(ailp
) &&
535 ailp
->xa_target
== ailp
->xa_target_prev
) {
536 spin_unlock(&ailp
->xa_lock
);
541 spin_unlock(&ailp
->xa_lock
);
544 schedule_timeout(msecs_to_jiffies(tout
));
546 __set_current_state(TASK_RUNNING
);
550 tout
= xfsaild_push(ailp
);
557 * This routine is called to move the tail of the AIL forward. It does this by
558 * trying to flush items in the AIL whose lsns are below the given
561 * The push is run asynchronously in a workqueue, which means the caller needs
562 * to handle waiting on the async flush for space to become available.
563 * We don't want to interrupt any push that is in progress, hence we only queue
564 * work if we set the pushing bit approriately.
566 * We do this unlocked - we only need to know whether there is anything in the
567 * AIL at the time we are called. We don't need to access the contents of
568 * any of the objects, so the lock is not needed.
572 struct xfs_ail
*ailp
,
573 xfs_lsn_t threshold_lsn
)
577 lip
= xfs_ail_min(ailp
);
578 if (!lip
|| XFS_FORCED_SHUTDOWN(ailp
->xa_mount
) ||
579 XFS_LSN_CMP(threshold_lsn
, ailp
->xa_target
) <= 0)
583 * Ensure that the new target is noticed in push code before it clears
584 * the XFS_AIL_PUSHING_BIT.
587 xfs_trans_ail_copy_lsn(ailp
, &ailp
->xa_target
, &threshold_lsn
);
590 wake_up_process(ailp
->xa_task
);
594 * Push out all items in the AIL immediately
598 struct xfs_ail
*ailp
)
600 xfs_lsn_t threshold_lsn
= xfs_ail_max_lsn(ailp
);
603 xfs_ail_push(ailp
, threshold_lsn
);
607 * Push out all items in the AIL immediately and wait until the AIL is empty.
610 xfs_ail_push_all_sync(
611 struct xfs_ail
*ailp
)
613 struct xfs_log_item
*lip
;
616 spin_lock(&ailp
->xa_lock
);
617 while ((lip
= xfs_ail_max(ailp
)) != NULL
) {
618 prepare_to_wait(&ailp
->xa_empty
, &wait
, TASK_UNINTERRUPTIBLE
);
619 ailp
->xa_target
= lip
->li_lsn
;
620 wake_up_process(ailp
->xa_task
);
621 spin_unlock(&ailp
->xa_lock
);
623 spin_lock(&ailp
->xa_lock
);
625 spin_unlock(&ailp
->xa_lock
);
627 finish_wait(&ailp
->xa_empty
, &wait
);
631 * xfs_trans_ail_update - bulk AIL insertion operation.
633 * @xfs_trans_ail_update takes an array of log items that all need to be
634 * positioned at the same LSN in the AIL. If an item is not in the AIL, it will
635 * be added. Otherwise, it will be repositioned by removing it and re-adding
636 * it to the AIL. If we move the first item in the AIL, update the log tail to
637 * match the new minimum LSN in the AIL.
639 * This function takes the AIL lock once to execute the update operations on
640 * all the items in the array, and as such should not be called with the AIL
641 * lock held. As a result, once we have the AIL lock, we need to check each log
642 * item LSN to confirm it needs to be moved forward in the AIL.
644 * To optimise the insert operation, we delete all the items from the AIL in
645 * the first pass, moving them into a temporary list, then splice the temporary
646 * list into the correct position in the AIL. This avoids needing to do an
647 * insert operation on every item.
649 * This function must be called with the AIL lock held. The lock is dropped
653 xfs_trans_ail_update_bulk(
654 struct xfs_ail
*ailp
,
655 struct xfs_ail_cursor
*cur
,
656 struct xfs_log_item
**log_items
,
658 xfs_lsn_t lsn
) __releases(ailp
->xa_lock
)
660 xfs_log_item_t
*mlip
;
661 int mlip_changed
= 0;
665 ASSERT(nr_items
> 0); /* Not required, but true. */
666 mlip
= xfs_ail_min(ailp
);
668 for (i
= 0; i
< nr_items
; i
++) {
669 struct xfs_log_item
*lip
= log_items
[i
];
670 if (lip
->li_flags
& XFS_LI_IN_AIL
) {
671 /* check if we really need to move the item */
672 if (XFS_LSN_CMP(lsn
, lip
->li_lsn
) <= 0)
675 xfs_ail_delete(ailp
, lip
);
679 lip
->li_flags
|= XFS_LI_IN_AIL
;
682 list_add(&lip
->li_ail
, &tmp
);
685 if (!list_empty(&tmp
))
686 xfs_ail_splice(ailp
, cur
, &tmp
, lsn
);
689 if (!XFS_FORCED_SHUTDOWN(ailp
->xa_mount
))
690 xlog_assign_tail_lsn_locked(ailp
->xa_mount
);
691 spin_unlock(&ailp
->xa_lock
);
693 xfs_log_space_wake(ailp
->xa_mount
);
695 spin_unlock(&ailp
->xa_lock
);
700 * xfs_trans_ail_delete_bulk - remove multiple log items from the AIL
702 * @xfs_trans_ail_delete_bulk takes an array of log items that all need to
703 * removed from the AIL. The caller is already holding the AIL lock, and done
704 * all the checks necessary to ensure the items passed in via @log_items are
705 * ready for deletion. This includes checking that the items are in the AIL.
707 * For each log item to be removed, unlink it from the AIL, clear the IN_AIL
708 * flag from the item and reset the item's lsn to 0. If we remove the first
709 * item in the AIL, update the log tail to match the new minimum LSN in the
712 * This function will not drop the AIL lock until all items are removed from
713 * the AIL to minimise the amount of lock traffic on the AIL. This does not
714 * greatly increase the AIL hold time, but does significantly reduce the amount
715 * of traffic on the lock, especially during IO completion.
717 * This function must be called with the AIL lock held. The lock is dropped
721 xfs_trans_ail_delete_bulk(
722 struct xfs_ail
*ailp
,
723 struct xfs_log_item
**log_items
,
725 int shutdown_type
) __releases(ailp
->xa_lock
)
727 xfs_log_item_t
*mlip
;
728 int mlip_changed
= 0;
731 mlip
= xfs_ail_min(ailp
);
733 for (i
= 0; i
< nr_items
; i
++) {
734 struct xfs_log_item
*lip
= log_items
[i
];
735 if (!(lip
->li_flags
& XFS_LI_IN_AIL
)) {
736 struct xfs_mount
*mp
= ailp
->xa_mount
;
738 spin_unlock(&ailp
->xa_lock
);
739 if (!XFS_FORCED_SHUTDOWN(mp
)) {
740 xfs_alert_tag(mp
, XFS_PTAG_AILDELETE
,
741 "%s: attempting to delete a log item that is not in the AIL",
743 xfs_force_shutdown(mp
, shutdown_type
);
748 xfs_ail_delete(ailp
, lip
);
749 lip
->li_flags
&= ~XFS_LI_IN_AIL
;
756 if (!XFS_FORCED_SHUTDOWN(ailp
->xa_mount
))
757 xlog_assign_tail_lsn_locked(ailp
->xa_mount
);
758 if (list_empty(&ailp
->xa_ail
))
759 wake_up_all(&ailp
->xa_empty
);
760 spin_unlock(&ailp
->xa_lock
);
762 xfs_log_space_wake(ailp
->xa_mount
);
764 spin_unlock(&ailp
->xa_lock
);
772 struct xfs_ail
*ailp
;
774 ailp
= kmem_zalloc(sizeof(struct xfs_ail
), KM_MAYFAIL
);
779 INIT_LIST_HEAD(&ailp
->xa_ail
);
780 INIT_LIST_HEAD(&ailp
->xa_cursors
);
781 spin_lock_init(&ailp
->xa_lock
);
782 INIT_LIST_HEAD(&ailp
->xa_buf_list
);
783 init_waitqueue_head(&ailp
->xa_empty
);
785 ailp
->xa_task
= kthread_run(xfsaild
, ailp
, "xfsaild/%s",
786 ailp
->xa_mount
->m_fsname
);
787 if (IS_ERR(ailp
->xa_task
))
799 xfs_trans_ail_destroy(
802 struct xfs_ail
*ailp
= mp
->m_ail
;
804 kthread_stop(ailp
->xa_task
);