2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_types.h"
24 #include "xfs_trans.h"
27 #include "xfs_mount.h"
28 #include "xfs_bmap_btree.h"
29 #include "xfs_inode.h"
30 #include "xfs_dinode.h"
31 #include "xfs_error.h"
32 #include "xfs_filestream.h"
33 #include "xfs_vnodeops.h"
34 #include "xfs_inode_item.h"
35 #include "xfs_quota.h"
36 #include "xfs_trace.h"
37 #include "xfs_fsops.h"
39 #include <linux/kthread.h>
40 #include <linux/freezer.h>
46 struct xfs_perag
*pag
,
47 uint32_t *first_index
,
54 * use a gang lookup to find the next inode in the tree
55 * as the tree is sparse and a gang lookup walks to find
56 * the number of objects requested.
58 if (tag
== XFS_ICI_NO_TAG
) {
59 nr_found
= radix_tree_gang_lookup(&pag
->pag_ici_root
,
60 (void **)&ip
, *first_index
, 1);
62 nr_found
= radix_tree_gang_lookup_tag(&pag
->pag_ici_root
,
63 (void **)&ip
, *first_index
, 1, tag
);
69 * Update the index for the next lookup. Catch overflows
70 * into the next AG range which can occur if we have inodes
71 * in the last block of the AG and we are currently
72 * pointing to the last inode.
74 *first_index
= XFS_INO_TO_AGINO(mp
, ip
->i_ino
+ 1);
75 if (*first_index
< XFS_INO_TO_AGINO(mp
, ip
->i_ino
))
83 struct xfs_perag
*pag
,
84 int (*execute
)(struct xfs_inode
*ip
,
85 struct xfs_perag
*pag
, int flags
),
103 write_lock(&pag
->pag_ici_lock
);
105 read_lock(&pag
->pag_ici_lock
);
106 ip
= xfs_inode_ag_lookup(mp
, pag
, &first_index
, tag
);
109 write_unlock(&pag
->pag_ici_lock
);
111 read_unlock(&pag
->pag_ici_lock
);
115 /* execute releases pag->pag_ici_lock */
116 error
= execute(ip
, pag
, flags
);
117 if (error
== EAGAIN
) {
124 /* bail out if the filesystem is corrupted. */
125 if (error
== EFSCORRUPTED
)
128 } while ((*nr_to_scan
)--);
138 * Select the next per-ag structure to iterate during the walk. The reclaim
139 * walk is optimised only to walk AGs with reclaimable inodes in them.
141 static struct xfs_perag
*
142 xfs_inode_ag_iter_next_pag(
143 struct xfs_mount
*mp
,
144 xfs_agnumber_t
*first
,
147 struct xfs_perag
*pag
= NULL
;
149 if (tag
== XFS_ICI_RECLAIM_TAG
) {
153 spin_lock(&mp
->m_perag_lock
);
154 found
= radix_tree_gang_lookup_tag(&mp
->m_perag_tree
,
155 (void **)&pag
, *first
, 1, tag
);
157 spin_unlock(&mp
->m_perag_lock
);
160 *first
= pag
->pag_agno
+ 1;
161 /* open coded pag reference increment */
162 ref
= atomic_inc_return(&pag
->pag_ref
);
163 spin_unlock(&mp
->m_perag_lock
);
164 trace_xfs_perag_get_reclaim(mp
, pag
->pag_agno
, ref
, _RET_IP_
);
166 pag
= xfs_perag_get(mp
, *first
);
173 xfs_inode_ag_iterator(
174 struct xfs_mount
*mp
,
175 int (*execute
)(struct xfs_inode
*ip
,
176 struct xfs_perag
*pag
, int flags
),
182 struct xfs_perag
*pag
;
188 nr
= nr_to_scan
? *nr_to_scan
: INT_MAX
;
190 while ((pag
= xfs_inode_ag_iter_next_pag(mp
, &ag
, tag
))) {
191 error
= xfs_inode_ag_walk(mp
, pag
, execute
, flags
, tag
,
196 if (error
== EFSCORRUPTED
)
204 return XFS_ERROR(last_error
);
207 /* must be called with pag_ici_lock held and releases it */
209 xfs_sync_inode_valid(
210 struct xfs_inode
*ip
,
211 struct xfs_perag
*pag
)
213 struct inode
*inode
= VFS_I(ip
);
214 int error
= EFSCORRUPTED
;
216 /* nothing to sync during shutdown */
217 if (XFS_FORCED_SHUTDOWN(ip
->i_mount
))
220 /* avoid new or reclaimable inodes. Leave for reclaim code to flush */
222 if (xfs_iflags_test(ip
, XFS_INEW
| XFS_IRECLAIMABLE
| XFS_IRECLAIM
))
225 /* If we can't grab the inode, it must on it's way to reclaim. */
229 if (is_bad_inode(inode
)) {
237 read_unlock(&pag
->pag_ici_lock
);
243 struct xfs_inode
*ip
,
244 struct xfs_perag
*pag
,
247 struct inode
*inode
= VFS_I(ip
);
248 struct address_space
*mapping
= inode
->i_mapping
;
251 error
= xfs_sync_inode_valid(ip
, pag
);
255 if (!mapping_tagged(mapping
, PAGECACHE_TAG_DIRTY
))
258 if (!xfs_ilock_nowait(ip
, XFS_IOLOCK_SHARED
)) {
259 if (flags
& SYNC_TRYLOCK
)
261 xfs_ilock(ip
, XFS_IOLOCK_SHARED
);
264 error
= xfs_flush_pages(ip
, 0, -1, (flags
& SYNC_WAIT
) ?
265 0 : XBF_ASYNC
, FI_NONE
);
266 xfs_iunlock(ip
, XFS_IOLOCK_SHARED
);
269 if (flags
& SYNC_WAIT
)
277 struct xfs_inode
*ip
,
278 struct xfs_perag
*pag
,
283 error
= xfs_sync_inode_valid(ip
, pag
);
287 xfs_ilock(ip
, XFS_ILOCK_SHARED
);
288 if (xfs_inode_clean(ip
))
290 if (!xfs_iflock_nowait(ip
)) {
291 if (!(flags
& SYNC_WAIT
))
296 if (xfs_inode_clean(ip
)) {
301 error
= xfs_iflush(ip
, flags
);
304 xfs_iunlock(ip
, XFS_ILOCK_SHARED
);
310 * Write out pagecache data for the whole filesystem.
314 struct xfs_mount
*mp
,
319 ASSERT((flags
& ~(SYNC_TRYLOCK
|SYNC_WAIT
)) == 0);
321 error
= xfs_inode_ag_iterator(mp
, xfs_sync_inode_data
, flags
,
322 XFS_ICI_NO_TAG
, 0, NULL
);
324 return XFS_ERROR(error
);
326 xfs_log_force(mp
, (flags
& SYNC_WAIT
) ? XFS_LOG_SYNC
: 0);
331 * Write out inode metadata (attributes) for the whole filesystem.
335 struct xfs_mount
*mp
,
338 ASSERT((flags
& ~SYNC_WAIT
) == 0);
340 return xfs_inode_ag_iterator(mp
, xfs_sync_inode_attr
, flags
,
341 XFS_ICI_NO_TAG
, 0, NULL
);
346 struct xfs_mount
*mp
)
351 * If the buffer is pinned then push on the log so we won't get stuck
352 * waiting in the write for someone, maybe ourselves, to flush the log.
354 * Even though we just pushed the log above, we did not have the
355 * superblock buffer locked at that point so it can become pinned in
356 * between there and here.
358 bp
= xfs_getsb(mp
, 0);
359 if (XFS_BUF_ISPINNED(bp
))
360 xfs_log_force(mp
, 0);
362 return xfs_bwrite(mp
, bp
);
366 * When remounting a filesystem read-only or freezing the filesystem, we have
367 * two phases to execute. This first phase is syncing the data before we
368 * quiesce the filesystem, and the second is flushing all the inodes out after
369 * we've waited for all the transactions created by the first phase to
370 * complete. The second phase ensures that the inodes are written to their
371 * location on disk rather than just existing in transactions in the log. This
372 * means after a quiesce there is no log replay required to write the inodes to
373 * disk (this is the main difference between a sync and a quiesce).
376 * First stage of freeze - no writers will make progress now we are here,
377 * so we flush delwri and delalloc buffers here, then wait for all I/O to
378 * complete. Data is frozen at that point. Metadata is not frozen,
379 * transactions can still occur here so don't bother flushing the buftarg
380 * because it'll just get dirty again.
384 struct xfs_mount
*mp
)
386 int error
, error2
= 0;
388 /* push non-blocking */
389 xfs_sync_data(mp
, 0);
390 xfs_qm_sync(mp
, SYNC_TRYLOCK
);
392 /* push and block till complete */
393 xfs_sync_data(mp
, SYNC_WAIT
);
394 xfs_qm_sync(mp
, SYNC_WAIT
);
396 /* write superblock and hoover up shutdown errors */
397 error
= xfs_sync_fsdata(mp
);
399 /* make sure all delwri buffers are written out */
400 xfs_flush_buftarg(mp
->m_ddev_targp
, 1);
402 /* mark the log as covered if needed */
403 if (xfs_log_need_covered(mp
))
404 error2
= xfs_fs_log_dummy(mp
, SYNC_WAIT
);
406 /* flush data-only devices */
407 if (mp
->m_rtdev_targp
)
408 XFS_bflush(mp
->m_rtdev_targp
);
410 return error
? error
: error2
;
415 struct xfs_mount
*mp
)
417 int count
= 0, pincount
;
419 xfs_reclaim_inodes(mp
, 0);
420 xfs_flush_buftarg(mp
->m_ddev_targp
, 0);
423 * This loop must run at least twice. The first instance of the loop
424 * will flush most meta data but that will generate more meta data
425 * (typically directory updates). Which then must be flushed and
426 * logged before we can write the unmount record. We also so sync
427 * reclaim of inodes to catch any that the above delwri flush skipped.
430 xfs_reclaim_inodes(mp
, SYNC_WAIT
);
431 xfs_sync_attr(mp
, SYNC_WAIT
);
432 pincount
= xfs_flush_buftarg(mp
->m_ddev_targp
, 1);
441 * Second stage of a quiesce. The data is already synced, now we have to take
442 * care of the metadata. New transactions are already blocked, so we need to
443 * wait for any remaining transactions to drain out before proceding.
447 struct xfs_mount
*mp
)
451 /* wait for all modifications to complete */
452 while (atomic_read(&mp
->m_active_trans
) > 0)
455 /* flush inodes and push all remaining buffers out to disk */
459 * Just warn here till VFS can correctly support
460 * read-only remount without racing.
462 WARN_ON(atomic_read(&mp
->m_active_trans
) != 0);
464 /* Push the superblock and write an unmount record */
465 error
= xfs_log_sbcount(mp
, 1);
467 xfs_fs_cmn_err(CE_WARN
, mp
,
468 "xfs_attr_quiesce: failed to log sb changes. "
469 "Frozen image may not be consistent.");
470 xfs_log_unmount_write(mp
);
471 xfs_unmountfs_writesb(mp
);
475 * Enqueue a work item to be picked up by the vfs xfssyncd thread.
476 * Doing this has two advantages:
477 * - It saves on stack space, which is tight in certain situations
478 * - It can be used (with care) as a mechanism to avoid deadlocks.
479 * Flushing while allocating in a full filesystem requires both.
482 xfs_syncd_queue_work(
483 struct xfs_mount
*mp
,
485 void (*syncer
)(struct xfs_mount
*, void *),
486 struct completion
*completion
)
488 struct xfs_sync_work
*work
;
490 work
= kmem_alloc(sizeof(struct xfs_sync_work
), KM_SLEEP
);
491 INIT_LIST_HEAD(&work
->w_list
);
492 work
->w_syncer
= syncer
;
495 work
->w_completion
= completion
;
496 spin_lock(&mp
->m_sync_lock
);
497 list_add_tail(&work
->w_list
, &mp
->m_sync_list
);
498 spin_unlock(&mp
->m_sync_lock
);
499 wake_up_process(mp
->m_sync_task
);
503 * Flush delayed allocate data, attempting to free up reserved space
504 * from existing allocations. At this point a new allocation attempt
505 * has failed with ENOSPC and we are in the process of scratching our
506 * heads, looking about for more room...
509 xfs_flush_inodes_work(
510 struct xfs_mount
*mp
,
513 struct inode
*inode
= arg
;
514 xfs_sync_data(mp
, SYNC_TRYLOCK
);
515 xfs_sync_data(mp
, SYNC_TRYLOCK
| SYNC_WAIT
);
523 struct inode
*inode
= VFS_I(ip
);
524 DECLARE_COMPLETION_ONSTACK(completion
);
527 xfs_syncd_queue_work(ip
->i_mount
, inode
, xfs_flush_inodes_work
, &completion
);
528 wait_for_completion(&completion
);
529 xfs_log_force(ip
->i_mount
, XFS_LOG_SYNC
);
533 * Every sync period we need to unpin all items, reclaim inodes and sync
534 * disk quotas. We might need to cover the log to indicate that the
535 * filesystem is idle and not frozen.
539 struct xfs_mount
*mp
,
544 if (!(mp
->m_flags
& XFS_MOUNT_RDONLY
)) {
545 xfs_log_force(mp
, 0);
546 xfs_reclaim_inodes(mp
, 0);
547 /* dgc: errors ignored here */
548 error
= xfs_qm_sync(mp
, SYNC_TRYLOCK
);
549 if (mp
->m_super
->s_frozen
== SB_UNFROZEN
&&
550 xfs_log_need_covered(mp
))
551 error
= xfs_fs_log_dummy(mp
, 0);
554 wake_up(&mp
->m_wait_single_sync_task
);
561 struct xfs_mount
*mp
= arg
;
563 xfs_sync_work_t
*work
, *n
;
567 timeleft
= xfs_syncd_centisecs
* msecs_to_jiffies(10);
569 if (list_empty(&mp
->m_sync_list
))
570 timeleft
= schedule_timeout_interruptible(timeleft
);
573 if (kthread_should_stop() && list_empty(&mp
->m_sync_list
))
576 spin_lock(&mp
->m_sync_lock
);
578 * We can get woken by laptop mode, to do a sync -
579 * that's the (only!) case where the list would be
580 * empty with time remaining.
582 if (!timeleft
|| list_empty(&mp
->m_sync_list
)) {
584 timeleft
= xfs_syncd_centisecs
*
585 msecs_to_jiffies(10);
586 INIT_LIST_HEAD(&mp
->m_sync_work
.w_list
);
587 list_add_tail(&mp
->m_sync_work
.w_list
,
590 list_splice_init(&mp
->m_sync_list
, &tmp
);
591 spin_unlock(&mp
->m_sync_lock
);
593 list_for_each_entry_safe(work
, n
, &tmp
, w_list
) {
594 (*work
->w_syncer
)(mp
, work
->w_data
);
595 list_del(&work
->w_list
);
596 if (work
== &mp
->m_sync_work
)
598 if (work
->w_completion
)
599 complete(work
->w_completion
);
609 struct xfs_mount
*mp
)
611 mp
->m_sync_work
.w_syncer
= xfs_sync_worker
;
612 mp
->m_sync_work
.w_mount
= mp
;
613 mp
->m_sync_work
.w_completion
= NULL
;
614 mp
->m_sync_task
= kthread_run(xfssyncd
, mp
, "xfssyncd/%s", mp
->m_fsname
);
615 if (IS_ERR(mp
->m_sync_task
))
616 return -PTR_ERR(mp
->m_sync_task
);
622 struct xfs_mount
*mp
)
624 kthread_stop(mp
->m_sync_task
);
628 __xfs_inode_set_reclaim_tag(
629 struct xfs_perag
*pag
,
630 struct xfs_inode
*ip
)
632 radix_tree_tag_set(&pag
->pag_ici_root
,
633 XFS_INO_TO_AGINO(ip
->i_mount
, ip
->i_ino
),
634 XFS_ICI_RECLAIM_TAG
);
636 if (!pag
->pag_ici_reclaimable
) {
637 /* propagate the reclaim tag up into the perag radix tree */
638 spin_lock(&ip
->i_mount
->m_perag_lock
);
639 radix_tree_tag_set(&ip
->i_mount
->m_perag_tree
,
640 XFS_INO_TO_AGNO(ip
->i_mount
, ip
->i_ino
),
641 XFS_ICI_RECLAIM_TAG
);
642 spin_unlock(&ip
->i_mount
->m_perag_lock
);
643 trace_xfs_perag_set_reclaim(ip
->i_mount
, pag
->pag_agno
,
646 pag
->pag_ici_reclaimable
++;
650 * We set the inode flag atomically with the radix tree tag.
651 * Once we get tag lookups on the radix tree, this inode flag
655 xfs_inode_set_reclaim_tag(
658 struct xfs_mount
*mp
= ip
->i_mount
;
659 struct xfs_perag
*pag
;
661 pag
= xfs_perag_get(mp
, XFS_INO_TO_AGNO(mp
, ip
->i_ino
));
662 write_lock(&pag
->pag_ici_lock
);
663 spin_lock(&ip
->i_flags_lock
);
664 __xfs_inode_set_reclaim_tag(pag
, ip
);
665 __xfs_iflags_set(ip
, XFS_IRECLAIMABLE
);
666 spin_unlock(&ip
->i_flags_lock
);
667 write_unlock(&pag
->pag_ici_lock
);
672 __xfs_inode_clear_reclaim(
676 pag
->pag_ici_reclaimable
--;
677 if (!pag
->pag_ici_reclaimable
) {
678 /* clear the reclaim tag from the perag radix tree */
679 spin_lock(&ip
->i_mount
->m_perag_lock
);
680 radix_tree_tag_clear(&ip
->i_mount
->m_perag_tree
,
681 XFS_INO_TO_AGNO(ip
->i_mount
, ip
->i_ino
),
682 XFS_ICI_RECLAIM_TAG
);
683 spin_unlock(&ip
->i_mount
->m_perag_lock
);
684 trace_xfs_perag_clear_reclaim(ip
->i_mount
, pag
->pag_agno
,
690 __xfs_inode_clear_reclaim_tag(
695 radix_tree_tag_clear(&pag
->pag_ici_root
,
696 XFS_INO_TO_AGINO(mp
, ip
->i_ino
), XFS_ICI_RECLAIM_TAG
);
697 __xfs_inode_clear_reclaim(pag
, ip
);
701 * Inodes in different states need to be treated differently, and the return
702 * value of xfs_iflush is not sufficient to get this right. The following table
703 * lists the inode states and the reclaim actions necessary for non-blocking
707 * inode state iflush ret required action
708 * --------------- ---------- ---------------
710 * shutdown EIO unpin and reclaim
711 * clean, unpinned 0 reclaim
712 * stale, unpinned 0 reclaim
713 * clean, pinned(*) 0 requeue
714 * stale, pinned EAGAIN requeue
715 * dirty, delwri ok 0 requeue
716 * dirty, delwri blocked EAGAIN requeue
717 * dirty, sync flush 0 reclaim
719 * (*) dgc: I don't think the clean, pinned state is possible but it gets
720 * handled anyway given the order of checks implemented.
722 * As can be seen from the table, the return value of xfs_iflush() is not
723 * sufficient to correctly decide the reclaim action here. The checks in
724 * xfs_iflush() might look like duplicates, but they are not.
726 * Also, because we get the flush lock first, we know that any inode that has
727 * been flushed delwri has had the flush completed by the time we check that
728 * the inode is clean. The clean inode check needs to be done before flushing
729 * the inode delwri otherwise we would loop forever requeuing clean inodes as
730 * we cannot tell apart a successful delwri flush and a clean inode from the
731 * return value of xfs_iflush().
733 * Note that because the inode is flushed delayed write by background
734 * writeback, the flush lock may already be held here and waiting on it can
735 * result in very long latencies. Hence for sync reclaims, where we wait on the
736 * flush lock, the caller should push out delayed write inodes first before
737 * trying to reclaim them to minimise the amount of time spent waiting. For
738 * background relaim, we just requeue the inode for the next pass.
740 * Hence the order of actions after gaining the locks should be:
742 * shutdown => unpin and reclaim
743 * pinned, delwri => requeue
744 * pinned, sync => unpin
747 * dirty, delwri => flush and requeue
748 * dirty, sync => flush, wait and reclaim
752 struct xfs_inode
*ip
,
753 struct xfs_perag
*pag
,
759 * The radix tree lock here protects a thread in xfs_iget from racing
760 * with us starting reclaim on the inode. Once we have the
761 * XFS_IRECLAIM flag set it will not touch us.
763 spin_lock(&ip
->i_flags_lock
);
764 ASSERT_ALWAYS(__xfs_iflags_test(ip
, XFS_IRECLAIMABLE
));
765 if (__xfs_iflags_test(ip
, XFS_IRECLAIM
)) {
766 /* ignore as it is already under reclaim */
767 spin_unlock(&ip
->i_flags_lock
);
768 write_unlock(&pag
->pag_ici_lock
);
771 __xfs_iflags_set(ip
, XFS_IRECLAIM
);
772 spin_unlock(&ip
->i_flags_lock
);
773 write_unlock(&pag
->pag_ici_lock
);
775 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
776 if (!xfs_iflock_nowait(ip
)) {
777 if (!(sync_mode
& SYNC_WAIT
))
782 if (is_bad_inode(VFS_I(ip
)))
784 if (XFS_FORCED_SHUTDOWN(ip
->i_mount
)) {
788 if (xfs_ipincount(ip
)) {
789 if (!(sync_mode
& SYNC_WAIT
)) {
795 if (xfs_iflags_test(ip
, XFS_ISTALE
))
797 if (xfs_inode_clean(ip
))
800 /* Now we have an inode that needs flushing */
801 error
= xfs_iflush(ip
, sync_mode
);
802 if (sync_mode
& SYNC_WAIT
) {
808 * When we have to flush an inode but don't have SYNC_WAIT set, we
809 * flush the inode out using a delwri buffer and wait for the next
810 * call into reclaim to find it in a clean state instead of waiting for
811 * it now. We also don't return errors here - if the error is transient
812 * then the next reclaim pass will flush the inode, and if the error
813 * is permanent then the next sync reclaim will reclaim the inode and
816 if (error
&& error
!= EAGAIN
&& !XFS_FORCED_SHUTDOWN(ip
->i_mount
)) {
817 xfs_fs_cmn_err(CE_WARN
, ip
->i_mount
,
818 "inode 0x%llx background reclaim flush failed with %d",
819 (long long)ip
->i_ino
, error
);
822 xfs_iflags_clear(ip
, XFS_IRECLAIM
);
823 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
825 * We could return EAGAIN here to make reclaim rescan the inode tree in
826 * a short while. However, this just burns CPU time scanning the tree
827 * waiting for IO to complete and xfssyncd never goes back to the idle
828 * state. Instead, return 0 to let the next scheduled background reclaim
829 * attempt to reclaim the inode again.
835 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
837 XFS_STATS_INC(xs_ig_reclaims
);
839 * Remove the inode from the per-AG radix tree.
841 * Because radix_tree_delete won't complain even if the item was never
842 * added to the tree assert that it's been there before to catch
843 * problems with the inode life time early on.
845 write_lock(&pag
->pag_ici_lock
);
846 if (!radix_tree_delete(&pag
->pag_ici_root
,
847 XFS_INO_TO_AGINO(ip
->i_mount
, ip
->i_ino
)))
849 __xfs_inode_clear_reclaim(pag
, ip
);
850 write_unlock(&pag
->pag_ici_lock
);
853 * Here we do an (almost) spurious inode lock in order to coordinate
854 * with inode cache radix tree lookups. This is because the lookup
855 * can reference the inodes in the cache without taking references.
857 * We make that OK here by ensuring that we wait until the inode is
858 * unlocked after the lookup before we go ahead and free it. We get
859 * both the ilock and the iolock because the code may need to drop the
860 * ilock one but will still hold the iolock.
862 xfs_ilock(ip
, XFS_ILOCK_EXCL
| XFS_IOLOCK_EXCL
);
864 xfs_iunlock(ip
, XFS_ILOCK_EXCL
| XFS_IOLOCK_EXCL
);
876 return xfs_inode_ag_iterator(mp
, xfs_reclaim_inode
, mode
,
877 XFS_ICI_RECLAIM_TAG
, 1, NULL
);
881 * Shrinker infrastructure.
884 xfs_reclaim_inode_shrink(
885 struct shrinker
*shrink
,
889 struct xfs_mount
*mp
;
890 struct xfs_perag
*pag
;
894 mp
= container_of(shrink
, struct xfs_mount
, m_inode_shrink
);
896 if (!(gfp_mask
& __GFP_FS
))
899 xfs_inode_ag_iterator(mp
, xfs_reclaim_inode
, 0,
900 XFS_ICI_RECLAIM_TAG
, 1, &nr_to_scan
);
901 /* if we don't exhaust the scan, don't bother coming back */
908 while ((pag
= xfs_inode_ag_iter_next_pag(mp
, &ag
,
909 XFS_ICI_RECLAIM_TAG
))) {
910 reclaimable
+= pag
->pag_ici_reclaimable
;
917 xfs_inode_shrinker_register(
918 struct xfs_mount
*mp
)
920 mp
->m_inode_shrink
.shrink
= xfs_reclaim_inode_shrink
;
921 mp
->m_inode_shrink
.seeks
= DEFAULT_SEEKS
;
922 register_shrinker(&mp
->m_inode_shrink
);
926 xfs_inode_shrinker_unregister(
927 struct xfs_mount
*mp
)
929 unregister_shrinker(&mp
->m_inode_shrink
);