NFSv4.1: Ensure that we handle _all_ SEQUENCE status bits.
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / fs / xfs / xfs_trans_ail.c
bloba4c281bf7a9b085d522ba90ad4f13494da0c7ee1
1 /*
2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
3 * Copyright (c) 2008 Dave Chinner
4 * All Rights Reserved.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it would be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 #include "xfs.h"
20 #include "xfs_fs.h"
21 #include "xfs_types.h"
22 #include "xfs_log.h"
23 #include "xfs_inum.h"
24 #include "xfs_trans.h"
25 #include "xfs_sb.h"
26 #include "xfs_ag.h"
27 #include "xfs_mount.h"
28 #include "xfs_trans_priv.h"
29 #include "xfs_error.h"
31 #ifdef DEBUG
33 * Check that the list is sorted as it should be.
35 STATIC void
36 xfs_ail_check(
37 struct xfs_ail *ailp,
38 xfs_log_item_t *lip)
40 xfs_log_item_t *prev_lip;
42 if (list_empty(&ailp->xa_ail))
43 return;
46 * Check the next and previous entries are valid.
48 ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0);
49 prev_lip = list_entry(lip->li_ail.prev, xfs_log_item_t, li_ail);
50 if (&prev_lip->li_ail != &ailp->xa_ail)
51 ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0);
53 prev_lip = list_entry(lip->li_ail.next, xfs_log_item_t, li_ail);
54 if (&prev_lip->li_ail != &ailp->xa_ail)
55 ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) >= 0);
58 #ifdef XFS_TRANS_DEBUG
60 * Walk the list checking lsn ordering, and that every entry has the
61 * XFS_LI_IN_AIL flag set. This is really expensive, so only do it
62 * when specifically debugging the transaction subsystem.
64 prev_lip = list_entry(&ailp->xa_ail, xfs_log_item_t, li_ail);
65 list_for_each_entry(lip, &ailp->xa_ail, li_ail) {
66 if (&prev_lip->li_ail != &ailp->xa_ail)
67 ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0);
68 ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0);
69 prev_lip = lip;
71 #endif /* XFS_TRANS_DEBUG */
73 #else /* !DEBUG */
74 #define xfs_ail_check(a,l)
75 #endif /* DEBUG */
78 * Return a pointer to the first item in the AIL. If the AIL is empty, then
79 * return NULL.
81 static xfs_log_item_t *
82 xfs_ail_min(
83 struct xfs_ail *ailp)
85 if (list_empty(&ailp->xa_ail))
86 return NULL;
88 return list_first_entry(&ailp->xa_ail, xfs_log_item_t, li_ail);
92 * Return a pointer to the last item in the AIL. If the AIL is empty, then
93 * return NULL.
95 static xfs_log_item_t *
96 xfs_ail_max(
97 struct xfs_ail *ailp)
99 if (list_empty(&ailp->xa_ail))
100 return NULL;
102 return list_entry(ailp->xa_ail.prev, xfs_log_item_t, li_ail);
106 * Return a pointer to the item which follows the given item in the AIL. If
107 * the given item is the last item in the list, then return NULL.
109 static xfs_log_item_t *
110 xfs_ail_next(
111 struct xfs_ail *ailp,
112 xfs_log_item_t *lip)
114 if (lip->li_ail.next == &ailp->xa_ail)
115 return NULL;
117 return list_first_entry(&lip->li_ail, xfs_log_item_t, li_ail);
121 * This is called by the log manager code to determine the LSN of the tail of
122 * the log. This is exactly the LSN of the first item in the AIL. If the AIL
123 * is empty, then this function returns 0.
125 * We need the AIL lock in order to get a coherent read of the lsn of the last
126 * item in the AIL.
128 xfs_lsn_t
129 xfs_ail_min_lsn(
130 struct xfs_ail *ailp)
132 xfs_lsn_t lsn = 0;
133 xfs_log_item_t *lip;
135 spin_lock(&ailp->xa_lock);
136 lip = xfs_ail_min(ailp);
137 if (lip)
138 lsn = lip->li_lsn;
139 spin_unlock(&ailp->xa_lock);
141 return lsn;
145 * Return the maximum lsn held in the AIL, or zero if the AIL is empty.
147 static xfs_lsn_t
148 xfs_ail_max_lsn(
149 struct xfs_ail *ailp)
151 xfs_lsn_t lsn = 0;
152 xfs_log_item_t *lip;
154 spin_lock(&ailp->xa_lock);
155 lip = xfs_ail_max(ailp);
156 if (lip)
157 lsn = lip->li_lsn;
158 spin_unlock(&ailp->xa_lock);
160 return lsn;
164 * AIL traversal cursor initialisation.
166 * The cursor keeps track of where our current traversal is up
167 * to by tracking the next ƣtem in the list for us. However, for
168 * this to be safe, removing an object from the AIL needs to invalidate
169 * any cursor that points to it. hence the traversal cursor needs to
170 * be linked to the struct xfs_ail so that deletion can search all the
171 * active cursors for invalidation.
173 * We don't link the push cursor because it is embedded in the struct
174 * xfs_ail and hence easily findable.
176 STATIC void
177 xfs_trans_ail_cursor_init(
178 struct xfs_ail *ailp,
179 struct xfs_ail_cursor *cur)
181 cur->item = NULL;
182 if (cur == &ailp->xa_cursors)
183 return;
185 cur->next = ailp->xa_cursors.next;
186 ailp->xa_cursors.next = cur;
190 * Set the cursor to the next item, because when we look
191 * up the cursor the current item may have been freed.
193 STATIC void
194 xfs_trans_ail_cursor_set(
195 struct xfs_ail *ailp,
196 struct xfs_ail_cursor *cur,
197 struct xfs_log_item *lip)
199 if (lip)
200 cur->item = xfs_ail_next(ailp, lip);
204 * Get the next item in the traversal and advance the cursor.
205 * If the cursor was invalidated (inidicated by a lip of 1),
206 * restart the traversal.
208 struct xfs_log_item *
209 xfs_trans_ail_cursor_next(
210 struct xfs_ail *ailp,
211 struct xfs_ail_cursor *cur)
213 struct xfs_log_item *lip = cur->item;
215 if ((__psint_t)lip & 1)
216 lip = xfs_ail_min(ailp);
217 xfs_trans_ail_cursor_set(ailp, cur, lip);
218 return lip;
222 * Now that the traversal is complete, we need to remove the cursor
223 * from the list of traversing cursors. Avoid removing the embedded
224 * push cursor, but use the fact it is always present to make the
225 * list deletion simple.
227 void
228 xfs_trans_ail_cursor_done(
229 struct xfs_ail *ailp,
230 struct xfs_ail_cursor *done)
232 struct xfs_ail_cursor *prev = NULL;
233 struct xfs_ail_cursor *cur;
235 done->item = NULL;
236 if (done == &ailp->xa_cursors)
237 return;
238 prev = &ailp->xa_cursors;
239 for (cur = prev->next; cur; prev = cur, cur = prev->next) {
240 if (cur == done) {
241 prev->next = cur->next;
242 break;
245 ASSERT(cur);
249 * Invalidate any cursor that is pointing to this item. This is
250 * called when an item is removed from the AIL. Any cursor pointing
251 * to this object is now invalid and the traversal needs to be
252 * terminated so it doesn't reference a freed object. We set the
253 * cursor item to a value of 1 so we can distinguish between an
254 * invalidation and the end of the list when getting the next item
255 * from the cursor.
257 STATIC void
258 xfs_trans_ail_cursor_clear(
259 struct xfs_ail *ailp,
260 struct xfs_log_item *lip)
262 struct xfs_ail_cursor *cur;
264 /* need to search all cursors */
265 for (cur = &ailp->xa_cursors; cur; cur = cur->next) {
266 if (cur->item == lip)
267 cur->item = (struct xfs_log_item *)
268 ((__psint_t)cur->item | 1);
273 * Initialise the cursor to the first item in the AIL with the given @lsn.
274 * This searches the list from lowest LSN to highest. Pass a @lsn of zero
275 * to initialise the cursor to the first item in the AIL.
277 xfs_log_item_t *
278 xfs_trans_ail_cursor_first(
279 struct xfs_ail *ailp,
280 struct xfs_ail_cursor *cur,
281 xfs_lsn_t lsn)
283 xfs_log_item_t *lip;
285 xfs_trans_ail_cursor_init(ailp, cur);
286 lip = xfs_ail_min(ailp);
287 if (lsn == 0)
288 goto out;
290 list_for_each_entry(lip, &ailp->xa_ail, li_ail) {
291 if (XFS_LSN_CMP(lip->li_lsn, lsn) >= 0)
292 goto out;
294 lip = NULL;
295 out:
296 xfs_trans_ail_cursor_set(ailp, cur, lip);
297 return lip;
301 * Initialise the cursor to the last item in the AIL with the given @lsn.
302 * This searches the list from highest LSN to lowest. If there is no item with
303 * the value of @lsn, then it sets the cursor to the last item with an LSN lower
304 * than @lsn.
306 static struct xfs_log_item *
307 __xfs_trans_ail_cursor_last(
308 struct xfs_ail *ailp,
309 xfs_lsn_t lsn)
311 xfs_log_item_t *lip;
313 list_for_each_entry_reverse(lip, &ailp->xa_ail, li_ail) {
314 if (XFS_LSN_CMP(lip->li_lsn, lsn) <= 0)
315 return lip;
317 return NULL;
321 * Initialise the cursor to the last item in the AIL with the given @lsn.
322 * This searches the list from highest LSN to lowest.
324 struct xfs_log_item *
325 xfs_trans_ail_cursor_last(
326 struct xfs_ail *ailp,
327 struct xfs_ail_cursor *cur,
328 xfs_lsn_t lsn)
330 xfs_trans_ail_cursor_init(ailp, cur);
331 cur->item = __xfs_trans_ail_cursor_last(ailp, lsn);
332 return cur->item;
336 * splice the log item list into the AIL at the given LSN. We splice to the
337 * tail of the given LSN to maintain insert order for push traversals. The
338 * cursor is optional, allowing repeated updates to the same LSN to avoid
339 * repeated traversals.
341 static void
342 xfs_ail_splice(
343 struct xfs_ail *ailp,
344 struct xfs_ail_cursor *cur,
345 struct list_head *list,
346 xfs_lsn_t lsn)
348 struct xfs_log_item *lip = cur ? cur->item : NULL;
349 struct xfs_log_item *next_lip;
352 * Get a new cursor if we don't have a placeholder or the existing one
353 * has been invalidated.
355 if (!lip || (__psint_t)lip & 1) {
356 lip = __xfs_trans_ail_cursor_last(ailp, lsn);
358 if (!lip) {
359 /* The list is empty, so just splice and return. */
360 if (cur)
361 cur->item = NULL;
362 list_splice(list, &ailp->xa_ail);
363 return;
368 * Our cursor points to the item we want to insert _after_, so we have
369 * to update the cursor to point to the end of the list we are splicing
370 * in so that it points to the correct location for the next splice.
371 * i.e. before the splice
373 * lsn -> lsn -> lsn + x -> lsn + x ...
375 * | cursor points here
377 * After the splice we have:
379 * lsn -> lsn -> lsn -> lsn -> .... -> lsn -> lsn + x -> lsn + x ...
380 * ^ ^
381 * | cursor points here | needs to move here
383 * So we set the cursor to the last item in the list to be spliced
384 * before we execute the splice, resulting in the cursor pointing to
385 * the correct item after the splice occurs.
387 if (cur) {
388 next_lip = list_entry(list->prev, struct xfs_log_item, li_ail);
389 cur->item = next_lip;
391 list_splice(list, &lip->li_ail);
395 * Delete the given item from the AIL. Return a pointer to the item.
397 static void
398 xfs_ail_delete(
399 struct xfs_ail *ailp,
400 xfs_log_item_t *lip)
402 xfs_ail_check(ailp, lip);
403 list_del(&lip->li_ail);
404 xfs_trans_ail_cursor_clear(ailp, lip);
407 static long
408 xfsaild_push(
409 struct xfs_ail *ailp)
411 xfs_mount_t *mp = ailp->xa_mount;
412 struct xfs_ail_cursor *cur = &ailp->xa_cursors;
413 xfs_log_item_t *lip;
414 xfs_lsn_t lsn;
415 xfs_lsn_t target;
416 long tout = 10;
417 int flush_log = 0;
418 int stuck = 0;
419 int count = 0;
420 int push_xfsbufd = 0;
422 spin_lock(&ailp->xa_lock);
423 target = ailp->xa_target;
424 xfs_trans_ail_cursor_init(ailp, cur);
425 lip = xfs_trans_ail_cursor_first(ailp, cur, ailp->xa_last_pushed_lsn);
426 if (!lip || XFS_FORCED_SHUTDOWN(mp)) {
428 * AIL is empty or our push has reached the end.
430 xfs_trans_ail_cursor_done(ailp, cur);
431 spin_unlock(&ailp->xa_lock);
432 goto out_done;
435 XFS_STATS_INC(xs_push_ail);
438 * While the item we are looking at is below the given threshold
439 * try to flush it out. We'd like not to stop until we've at least
440 * tried to push on everything in the AIL with an LSN less than
441 * the given threshold.
443 * However, we will stop after a certain number of pushes and wait
444 * for a reduced timeout to fire before pushing further. This
445 * prevents use from spinning when we can't do anything or there is
446 * lots of contention on the AIL lists.
448 lsn = lip->li_lsn;
449 while ((XFS_LSN_CMP(lip->li_lsn, target) <= 0)) {
450 int lock_result;
452 * If we can lock the item without sleeping, unlock the AIL
453 * lock and flush the item. Then re-grab the AIL lock so we
454 * can look for the next item on the AIL. List changes are
455 * handled by the AIL lookup functions internally
457 * If we can't lock the item, either its holder will flush it
458 * or it is already being flushed or it is being relogged. In
459 * any of these case it is being taken care of and we can just
460 * skip to the next item in the list.
462 lock_result = IOP_TRYLOCK(lip);
463 spin_unlock(&ailp->xa_lock);
464 switch (lock_result) {
465 case XFS_ITEM_SUCCESS:
466 XFS_STATS_INC(xs_push_ail_success);
467 IOP_PUSH(lip);
468 ailp->xa_last_pushed_lsn = lsn;
469 break;
471 case XFS_ITEM_PUSHBUF:
472 XFS_STATS_INC(xs_push_ail_pushbuf);
474 if (!IOP_PUSHBUF(lip)) {
475 stuck++;
476 flush_log = 1;
477 } else {
478 ailp->xa_last_pushed_lsn = lsn;
480 push_xfsbufd = 1;
481 break;
483 case XFS_ITEM_PINNED:
484 XFS_STATS_INC(xs_push_ail_pinned);
485 stuck++;
486 flush_log = 1;
487 break;
489 case XFS_ITEM_LOCKED:
490 XFS_STATS_INC(xs_push_ail_locked);
491 stuck++;
492 break;
494 default:
495 ASSERT(0);
496 break;
499 spin_lock(&ailp->xa_lock);
500 /* should we bother continuing? */
501 if (XFS_FORCED_SHUTDOWN(mp))
502 break;
503 ASSERT(mp->m_log);
505 count++;
508 * Are there too many items we can't do anything with?
509 * If we we are skipping too many items because we can't flush
510 * them or they are already being flushed, we back off and
511 * given them time to complete whatever operation is being
512 * done. i.e. remove pressure from the AIL while we can't make
513 * progress so traversals don't slow down further inserts and
514 * removals to/from the AIL.
516 * The value of 100 is an arbitrary magic number based on
517 * observation.
519 if (stuck > 100)
520 break;
522 lip = xfs_trans_ail_cursor_next(ailp, cur);
523 if (lip == NULL)
524 break;
525 lsn = lip->li_lsn;
527 xfs_trans_ail_cursor_done(ailp, cur);
528 spin_unlock(&ailp->xa_lock);
530 if (flush_log) {
532 * If something we need to push out was pinned, then
533 * push out the log so it will become unpinned and
534 * move forward in the AIL.
536 XFS_STATS_INC(xs_push_ail_flush);
537 xfs_log_force(mp, 0);
540 if (push_xfsbufd) {
541 /* we've got delayed write buffers to flush */
542 wake_up_process(mp->m_ddev_targp->bt_task);
545 /* assume we have more work to do in a short while */
546 out_done:
547 if (!count) {
548 /* We're past our target or empty, so idle */
549 ailp->xa_last_pushed_lsn = 0;
551 tout = 50;
552 } else if (XFS_LSN_CMP(lsn, target) >= 0) {
554 * We reached the target so wait a bit longer for I/O to
555 * complete and remove pushed items from the AIL before we
556 * start the next scan from the start of the AIL.
558 tout = 50;
559 ailp->xa_last_pushed_lsn = 0;
560 } else if ((stuck * 100) / count > 90) {
562 * Either there is a lot of contention on the AIL or we
563 * are stuck due to operations in progress. "Stuck" in this
564 * case is defined as >90% of the items we tried to push
565 * were stuck.
567 * Backoff a bit more to allow some I/O to complete before
568 * continuing from where we were.
570 tout = 20;
573 return tout;
576 static int
577 xfsaild(
578 void *data)
580 struct xfs_ail *ailp = data;
581 long tout = 0; /* milliseconds */
583 while (!kthread_should_stop()) {
584 if (tout && tout <= 20)
585 __set_current_state(TASK_KILLABLE);
586 else
587 __set_current_state(TASK_INTERRUPTIBLE);
588 schedule_timeout(tout ?
589 msecs_to_jiffies(tout) : MAX_SCHEDULE_TIMEOUT);
591 try_to_freeze();
593 tout = xfsaild_push(ailp);
596 return 0;
600 * This routine is called to move the tail of the AIL forward. It does this by
601 * trying to flush items in the AIL whose lsns are below the given
602 * threshold_lsn.
604 * The push is run asynchronously in a workqueue, which means the caller needs
605 * to handle waiting on the async flush for space to become available.
606 * We don't want to interrupt any push that is in progress, hence we only queue
607 * work if we set the pushing bit approriately.
609 * We do this unlocked - we only need to know whether there is anything in the
610 * AIL at the time we are called. We don't need to access the contents of
611 * any of the objects, so the lock is not needed.
613 void
614 xfs_ail_push(
615 struct xfs_ail *ailp,
616 xfs_lsn_t threshold_lsn)
618 xfs_log_item_t *lip;
620 lip = xfs_ail_min(ailp);
621 if (!lip || XFS_FORCED_SHUTDOWN(ailp->xa_mount) ||
622 XFS_LSN_CMP(threshold_lsn, ailp->xa_target) <= 0)
623 return;
626 * Ensure that the new target is noticed in push code before it clears
627 * the XFS_AIL_PUSHING_BIT.
629 smp_wmb();
630 xfs_trans_ail_copy_lsn(ailp, &ailp->xa_target, &threshold_lsn);
631 smp_wmb();
633 wake_up_process(ailp->xa_task);
637 * Push out all items in the AIL immediately
639 void
640 xfs_ail_push_all(
641 struct xfs_ail *ailp)
643 xfs_lsn_t threshold_lsn = xfs_ail_max_lsn(ailp);
645 if (threshold_lsn)
646 xfs_ail_push(ailp, threshold_lsn);
650 * This is to be called when an item is unlocked that may have
651 * been in the AIL. It will wake up the first member of the AIL
652 * wait list if this item's unlocking might allow it to progress.
653 * If the item is in the AIL, then we need to get the AIL lock
654 * while doing our checking so we don't race with someone going
655 * to sleep waiting for this event in xfs_trans_push_ail().
657 void
658 xfs_trans_unlocked_item(
659 struct xfs_ail *ailp,
660 xfs_log_item_t *lip)
662 xfs_log_item_t *min_lip;
665 * If we're forcibly shutting down, we may have
666 * unlocked log items arbitrarily. The last thing
667 * we want to do is to move the tail of the log
668 * over some potentially valid data.
670 if (!(lip->li_flags & XFS_LI_IN_AIL) ||
671 XFS_FORCED_SHUTDOWN(ailp->xa_mount)) {
672 return;
676 * This is the one case where we can call into xfs_ail_min()
677 * without holding the AIL lock because we only care about the
678 * case where we are at the tail of the AIL. If the object isn't
679 * at the tail, it doesn't matter what result we get back. This
680 * is slightly racy because since we were just unlocked, we could
681 * go to sleep between the call to xfs_ail_min and the call to
682 * xfs_log_move_tail, have someone else lock us, commit to us disk,
683 * move us out of the tail of the AIL, and then we wake up. However,
684 * the call to xfs_log_move_tail() doesn't do anything if there's
685 * not enough free space to wake people up so we're safe calling it.
687 min_lip = xfs_ail_min(ailp);
689 if (min_lip == lip)
690 xfs_log_move_tail(ailp->xa_mount, 1);
691 } /* xfs_trans_unlocked_item */
694 * xfs_trans_ail_update - bulk AIL insertion operation.
696 * @xfs_trans_ail_update takes an array of log items that all need to be
697 * positioned at the same LSN in the AIL. If an item is not in the AIL, it will
698 * be added. Otherwise, it will be repositioned by removing it and re-adding
699 * it to the AIL. If we move the first item in the AIL, update the log tail to
700 * match the new minimum LSN in the AIL.
702 * This function takes the AIL lock once to execute the update operations on
703 * all the items in the array, and as such should not be called with the AIL
704 * lock held. As a result, once we have the AIL lock, we need to check each log
705 * item LSN to confirm it needs to be moved forward in the AIL.
707 * To optimise the insert operation, we delete all the items from the AIL in
708 * the first pass, moving them into a temporary list, then splice the temporary
709 * list into the correct position in the AIL. This avoids needing to do an
710 * insert operation on every item.
712 * This function must be called with the AIL lock held. The lock is dropped
713 * before returning.
715 void
716 xfs_trans_ail_update_bulk(
717 struct xfs_ail *ailp,
718 struct xfs_ail_cursor *cur,
719 struct xfs_log_item **log_items,
720 int nr_items,
721 xfs_lsn_t lsn) __releases(ailp->xa_lock)
723 xfs_log_item_t *mlip;
724 xfs_lsn_t tail_lsn;
725 int mlip_changed = 0;
726 int i;
727 LIST_HEAD(tmp);
729 mlip = xfs_ail_min(ailp);
731 for (i = 0; i < nr_items; i++) {
732 struct xfs_log_item *lip = log_items[i];
733 if (lip->li_flags & XFS_LI_IN_AIL) {
734 /* check if we really need to move the item */
735 if (XFS_LSN_CMP(lsn, lip->li_lsn) <= 0)
736 continue;
738 xfs_ail_delete(ailp, lip);
739 if (mlip == lip)
740 mlip_changed = 1;
741 } else {
742 lip->li_flags |= XFS_LI_IN_AIL;
744 lip->li_lsn = lsn;
745 list_add(&lip->li_ail, &tmp);
748 xfs_ail_splice(ailp, cur, &tmp, lsn);
750 if (!mlip_changed) {
751 spin_unlock(&ailp->xa_lock);
752 return;
756 * It is not safe to access mlip after the AIL lock is dropped, so we
757 * must get a copy of li_lsn before we do so. This is especially
758 * important on 32-bit platforms where accessing and updating 64-bit
759 * values like li_lsn is not atomic.
761 mlip = xfs_ail_min(ailp);
762 tail_lsn = mlip->li_lsn;
763 spin_unlock(&ailp->xa_lock);
764 xfs_log_move_tail(ailp->xa_mount, tail_lsn);
768 * xfs_trans_ail_delete_bulk - remove multiple log items from the AIL
770 * @xfs_trans_ail_delete_bulk takes an array of log items that all need to
771 * removed from the AIL. The caller is already holding the AIL lock, and done
772 * all the checks necessary to ensure the items passed in via @log_items are
773 * ready for deletion. This includes checking that the items are in the AIL.
775 * For each log item to be removed, unlink it from the AIL, clear the IN_AIL
776 * flag from the item and reset the item's lsn to 0. If we remove the first
777 * item in the AIL, update the log tail to match the new minimum LSN in the
778 * AIL.
780 * This function will not drop the AIL lock until all items are removed from
781 * the AIL to minimise the amount of lock traffic on the AIL. This does not
782 * greatly increase the AIL hold time, but does significantly reduce the amount
783 * of traffic on the lock, especially during IO completion.
785 * This function must be called with the AIL lock held. The lock is dropped
786 * before returning.
788 void
789 xfs_trans_ail_delete_bulk(
790 struct xfs_ail *ailp,
791 struct xfs_log_item **log_items,
792 int nr_items) __releases(ailp->xa_lock)
794 xfs_log_item_t *mlip;
795 xfs_lsn_t tail_lsn;
796 int mlip_changed = 0;
797 int i;
799 mlip = xfs_ail_min(ailp);
801 for (i = 0; i < nr_items; i++) {
802 struct xfs_log_item *lip = log_items[i];
803 if (!(lip->li_flags & XFS_LI_IN_AIL)) {
804 struct xfs_mount *mp = ailp->xa_mount;
806 spin_unlock(&ailp->xa_lock);
807 if (!XFS_FORCED_SHUTDOWN(mp)) {
808 xfs_alert_tag(mp, XFS_PTAG_AILDELETE,
809 "%s: attempting to delete a log item that is not in the AIL",
810 __func__);
811 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
813 return;
816 xfs_ail_delete(ailp, lip);
817 lip->li_flags &= ~XFS_LI_IN_AIL;
818 lip->li_lsn = 0;
819 if (mlip == lip)
820 mlip_changed = 1;
823 if (!mlip_changed) {
824 spin_unlock(&ailp->xa_lock);
825 return;
829 * It is not safe to access mlip after the AIL lock is dropped, so we
830 * must get a copy of li_lsn before we do so. This is especially
831 * important on 32-bit platforms where accessing and updating 64-bit
832 * values like li_lsn is not atomic. It is possible we've emptied the
833 * AIL here, so if that is the case, pass an LSN of 0 to the tail move.
835 mlip = xfs_ail_min(ailp);
836 tail_lsn = mlip ? mlip->li_lsn : 0;
837 spin_unlock(&ailp->xa_lock);
838 xfs_log_move_tail(ailp->xa_mount, tail_lsn);
842 * The active item list (AIL) is a doubly linked list of log
843 * items sorted by ascending lsn. The base of the list is
844 * a forw/back pointer pair embedded in the xfs mount structure.
845 * The base is initialized with both pointers pointing to the
846 * base. This case always needs to be distinguished, because
847 * the base has no lsn to look at. We almost always insert
848 * at the end of the list, so on inserts we search from the
849 * end of the list to find where the new item belongs.
853 * Initialize the doubly linked list to point only to itself.
856 xfs_trans_ail_init(
857 xfs_mount_t *mp)
859 struct xfs_ail *ailp;
861 ailp = kmem_zalloc(sizeof(struct xfs_ail), KM_MAYFAIL);
862 if (!ailp)
863 return ENOMEM;
865 ailp->xa_mount = mp;
866 INIT_LIST_HEAD(&ailp->xa_ail);
867 spin_lock_init(&ailp->xa_lock);
869 ailp->xa_task = kthread_run(xfsaild, ailp, "xfsaild/%s",
870 ailp->xa_mount->m_fsname);
871 if (IS_ERR(ailp->xa_task))
872 goto out_free_ailp;
874 mp->m_ail = ailp;
875 return 0;
877 out_free_ailp:
878 kmem_free(ailp);
879 return ENOMEM;
882 void
883 xfs_trans_ail_destroy(
884 xfs_mount_t *mp)
886 struct xfs_ail *ailp = mp->m_ail;
888 kthread_stop(ailp->xa_task);
889 kmem_free(ailp);