4 * Copyright (C) 2002, Linus Torvalds.
6 * Contains all the functions related to writing back and waiting
7 * upon dirty inodes against superblocks, and writing back dirty
8 * pages against inodes. ie: data writeback. Writeout of the
9 * inode itself is not handled here.
11 * 10Apr2002 akpm@zip.com.au
12 * Split out of fs/inode.c
13 * Additions for address_space-based writeback
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/spinlock.h>
19 #include <linux/sched.h>
22 #include <linux/writeback.h>
23 #include <linux/blkdev.h>
24 #include <linux/backing-dev.h>
25 #include <linux/buffer_head.h>
29 * __mark_inode_dirty - internal function
30 * @inode: inode to mark
31 * @flags: what kind of dirty (i.e. I_DIRTY_SYNC)
32 * Mark an inode as dirty. Callers should use mark_inode_dirty or
33 * mark_inode_dirty_sync.
35 * Put the inode on the super block's dirty list.
37 * CAREFUL! We mark it dirty unconditionally, but move it onto the
38 * dirty list only if it is hashed or if it refers to a blockdev.
39 * If it was not hashed, it will never be added to the dirty list
40 * even if it is later hashed, as it will have been marked dirty already.
42 * In short, make sure you hash any inodes _before_ you start marking
45 * This function *must* be atomic for the I_DIRTY_PAGES case -
46 * set_page_dirty() is called under spinlock in several places.
48 * Note that for blockdevs, inode->dirtied_when represents the dirtying time of
49 * the block-special inode (/dev/hda1) itself. And the ->dirtied_when field of
50 * the kernel-internal blockdev inode represents the dirtying time of the
51 * blockdev's pages. This is why for I_DIRTY_PAGES we always use
52 * page->mapping->host, so the page-dirtying time is recorded in the internal
55 void __mark_inode_dirty(struct inode
*inode
, int flags
)
57 struct super_block
*sb
= inode
->i_sb
;
60 * Don't do this for I_DIRTY_PAGES - that doesn't actually
61 * dirty the inode itself
63 if (flags
& (I_DIRTY_SYNC
| I_DIRTY_DATASYNC
)) {
64 if (sb
->s_op
->dirty_inode
)
65 sb
->s_op
->dirty_inode(inode
);
69 * make sure that changes are seen by all cpus before we test i_state
74 /* avoid the locking if we can */
75 if ((inode
->i_state
& flags
) == flags
)
78 if (unlikely(block_dump
)) {
79 struct dentry
*dentry
= NULL
;
80 const char *name
= "?";
82 if (!list_empty(&inode
->i_dentry
)) {
83 dentry
= list_entry(inode
->i_dentry
.next
,
84 struct dentry
, d_alias
);
85 if (dentry
&& dentry
->d_name
.name
)
86 name
= (const char *) dentry
->d_name
.name
;
89 if (inode
->i_ino
|| strcmp(inode
->i_sb
->s_id
, "bdev"))
91 "%s(%d): dirtied inode %lu (%s) on %s\n",
92 current
->comm
, task_pid_nr(current
), inode
->i_ino
,
93 name
, inode
->i_sb
->s_id
);
96 spin_lock(&inode_lock
);
97 if ((inode
->i_state
& flags
) != flags
) {
98 const int was_dirty
= inode
->i_state
& I_DIRTY
;
100 inode
->i_state
|= flags
;
103 * If the inode is being synced, just update its dirty state.
104 * The unlocker will place the inode on the appropriate
105 * superblock list, based upon its state.
107 if (inode
->i_state
& I_SYNC
)
111 * Only add valid (hashed) inodes to the superblock's
112 * dirty list. Add blockdev inodes as well.
114 if (!S_ISBLK(inode
->i_mode
)) {
115 if (hlist_unhashed(&inode
->i_hash
))
118 if (inode
->i_state
& (I_FREEING
|I_CLEAR
))
122 * If the inode was already on s_dirty/s_io/s_more_io, don't
123 * reposition it (that would break s_dirty time-ordering).
126 inode
->dirtied_when
= jiffies
;
127 list_move(&inode
->i_list
, &sb
->s_dirty
);
131 spin_unlock(&inode_lock
);
134 EXPORT_SYMBOL(__mark_inode_dirty
);
136 static int write_inode(struct inode
*inode
, int sync
)
138 if (inode
->i_sb
->s_op
->write_inode
&& !is_bad_inode(inode
))
139 return inode
->i_sb
->s_op
->write_inode(inode
, sync
);
144 * Redirty an inode: set its when-it-was dirtied timestamp and move it to the
145 * furthest end of its superblock's dirty-inode list.
147 * Before stamping the inode's ->dirtied_when, we check to see whether it is
148 * already the most-recently-dirtied inode on the s_dirty list. If that is
149 * the case then the inode must have been redirtied while it was being written
150 * out and we don't reset its dirtied_when.
152 static void redirty_tail(struct inode
*inode
)
154 struct super_block
*sb
= inode
->i_sb
;
156 if (!list_empty(&sb
->s_dirty
)) {
157 struct inode
*tail_inode
;
159 tail_inode
= list_entry(sb
->s_dirty
.next
, struct inode
, i_list
);
160 if (!time_after_eq(inode
->dirtied_when
,
161 tail_inode
->dirtied_when
))
162 inode
->dirtied_when
= jiffies
;
164 list_move(&inode
->i_list
, &sb
->s_dirty
);
168 * requeue inode for re-scanning after sb->s_io list is exhausted.
170 static void requeue_io(struct inode
*inode
)
172 list_move(&inode
->i_list
, &inode
->i_sb
->s_more_io
);
175 static void inode_sync_complete(struct inode
*inode
)
178 * Prevent speculative execution through spin_unlock(&inode_lock);
181 wake_up_bit(&inode
->i_state
, __I_SYNC
);
185 * Move expired dirty inodes from @delaying_queue to @dispatch_queue.
187 static void move_expired_inodes(struct list_head
*delaying_queue
,
188 struct list_head
*dispatch_queue
,
189 unsigned long *older_than_this
)
191 while (!list_empty(delaying_queue
)) {
192 struct inode
*inode
= list_entry(delaying_queue
->prev
,
193 struct inode
, i_list
);
194 if (older_than_this
&&
195 time_after(inode
->dirtied_when
, *older_than_this
))
197 list_move(&inode
->i_list
, dispatch_queue
);
202 * Queue all expired dirty inodes for io, eldest first.
204 static void queue_io(struct super_block
*sb
,
205 unsigned long *older_than_this
)
207 list_splice_init(&sb
->s_more_io
, sb
->s_io
.prev
);
208 move_expired_inodes(&sb
->s_dirty
, &sb
->s_io
, older_than_this
);
211 int sb_has_dirty_inodes(struct super_block
*sb
)
213 return !list_empty(&sb
->s_dirty
) ||
214 !list_empty(&sb
->s_io
) ||
215 !list_empty(&sb
->s_more_io
);
217 EXPORT_SYMBOL(sb_has_dirty_inodes
);
220 * Write a single inode's dirty pages and inode data out to disk.
221 * If `wait' is set, wait on the writeout.
223 * The whole writeout design is quite complex and fragile. We want to avoid
224 * starvation of particular inodes when others are being redirtied, prevent
227 * Called under inode_lock.
230 __sync_single_inode(struct inode
*inode
, struct writeback_control
*wbc
)
233 struct address_space
*mapping
= inode
->i_mapping
;
234 int wait
= wbc
->sync_mode
== WB_SYNC_ALL
;
237 BUG_ON(inode
->i_state
& I_SYNC
);
239 /* Set I_SYNC, reset I_DIRTY */
240 dirty
= inode
->i_state
& I_DIRTY
;
241 inode
->i_state
|= I_SYNC
;
242 inode
->i_state
&= ~I_DIRTY
;
244 spin_unlock(&inode_lock
);
246 ret
= do_writepages(mapping
, wbc
);
248 /* Don't write the inode if only I_DIRTY_PAGES was set */
249 if (dirty
& (I_DIRTY_SYNC
| I_DIRTY_DATASYNC
)) {
250 int err
= write_inode(inode
, wait
);
256 int err
= filemap_fdatawait(mapping
);
261 spin_lock(&inode_lock
);
262 inode
->i_state
&= ~I_SYNC
;
263 if (!(inode
->i_state
& I_FREEING
)) {
264 if (!(inode
->i_state
& I_DIRTY
) &&
265 mapping_tagged(mapping
, PAGECACHE_TAG_DIRTY
)) {
267 * We didn't write back all the pages. nfs_writepages()
268 * sometimes bales out without doing anything. Redirty
269 * the inode; Move it from s_io onto s_more_io/s_dirty.
272 * akpm: if the caller was the kupdate function we put
273 * this inode at the head of s_dirty so it gets first
274 * consideration. Otherwise, move it to the tail, for
275 * the reasons described there. I'm not really sure
276 * how much sense this makes. Presumably I had a good
277 * reasons for doing it this way, and I'd rather not
278 * muck with it at present.
280 if (wbc
->for_kupdate
) {
282 * For the kupdate function we move the inode
283 * to s_more_io so it will get more writeout as
284 * soon as the queue becomes uncongested.
286 inode
->i_state
|= I_DIRTY_PAGES
;
287 if (wbc
->nr_to_write
<= 0) {
289 * slice used up: queue for next turn
294 * somehow blocked: retry later
300 * Otherwise fully redirty the inode so that
301 * other inodes on this superblock will get some
302 * writeout. Otherwise heavy writing to one
303 * file would indefinitely suspend writeout of
304 * all the other files.
306 inode
->i_state
|= I_DIRTY_PAGES
;
309 } else if (inode
->i_state
& I_DIRTY
) {
311 * Someone redirtied the inode while were writing back
315 } else if (atomic_read(&inode
->i_count
)) {
317 * The inode is clean, inuse
319 list_move(&inode
->i_list
, &inode_in_use
);
322 * The inode is clean, unused
324 list_move(&inode
->i_list
, &inode_unused
);
327 inode_sync_complete(inode
);
332 * Write out an inode's dirty pages. Called under inode_lock. Either the
333 * caller has ref on the inode (either via __iget or via syscall against an fd)
334 * or the inode has I_WILL_FREE set (via generic_forget_inode)
337 __writeback_single_inode(struct inode
*inode
, struct writeback_control
*wbc
)
339 wait_queue_head_t
*wqh
;
341 if (!atomic_read(&inode
->i_count
))
342 WARN_ON(!(inode
->i_state
& (I_WILL_FREE
|I_FREEING
)));
344 WARN_ON(inode
->i_state
& I_WILL_FREE
);
346 if ((wbc
->sync_mode
!= WB_SYNC_ALL
) && (inode
->i_state
& I_SYNC
)) {
348 * We're skipping this inode because it's locked, and we're not
349 * doing writeback-for-data-integrity. Move it to s_more_io so
350 * that writeback can proceed with the other inodes on s_io.
351 * We'll have another go at writing back this inode when we
352 * completed a full scan of s_io.
359 * It's a data-integrity sync. We must wait.
361 if (inode
->i_state
& I_SYNC
) {
362 DEFINE_WAIT_BIT(wq
, &inode
->i_state
, __I_SYNC
);
364 wqh
= bit_waitqueue(&inode
->i_state
, __I_SYNC
);
366 spin_unlock(&inode_lock
);
367 __wait_on_bit(wqh
, &wq
, inode_wait
,
368 TASK_UNINTERRUPTIBLE
);
369 spin_lock(&inode_lock
);
370 } while (inode
->i_state
& I_SYNC
);
372 return __sync_single_inode(inode
, wbc
);
376 * Write out a superblock's list of dirty inodes. A wait will be performed
377 * upon no inodes, all inodes or the final one, depending upon sync_mode.
379 * If older_than_this is non-NULL, then only write out inodes which
380 * had their first dirtying at a time earlier than *older_than_this.
382 * If we're a pdlfush thread, then implement pdflush collision avoidance
383 * against the entire list.
385 * WB_SYNC_HOLD is a hack for sys_sync(): reattach the inode to sb->s_dirty so
386 * that it can be located for waiting on in __writeback_single_inode().
388 * Called under inode_lock.
390 * If `bdi' is non-zero then we're being asked to writeback a specific queue.
391 * This function assumes that the blockdev superblock's inodes are backed by
392 * a variety of queues, so all inodes are searched. For other superblocks,
393 * assume that all inodes are backed by the same queue.
395 * FIXME: this linear search could get expensive with many fileystems. But
396 * how to fix? We need to go from an address_space to all inodes which share
397 * a queue with that address_space. (Easy: have a global "dirty superblocks"
400 * The inodes to be written are parked on sb->s_io. They are moved back onto
401 * sb->s_dirty as they are selected for writing. This way, none can be missed
402 * on the writer throttling path, and we get decent balancing between many
403 * throttled threads: we don't want them all piling up on inode_sync_wait.
406 sync_sb_inodes(struct super_block
*sb
, struct writeback_control
*wbc
)
408 const unsigned long start
= jiffies
; /* livelock avoidance */
410 if (!wbc
->for_kupdate
|| list_empty(&sb
->s_io
))
411 queue_io(sb
, wbc
->older_than_this
);
413 while (!list_empty(&sb
->s_io
)) {
414 struct inode
*inode
= list_entry(sb
->s_io
.prev
,
415 struct inode
, i_list
);
416 struct address_space
*mapping
= inode
->i_mapping
;
417 struct backing_dev_info
*bdi
= mapping
->backing_dev_info
;
420 if (!bdi_cap_writeback_dirty(bdi
)) {
422 if (sb_is_blkdev_sb(sb
)) {
424 * Dirty memory-backed blockdev: the ramdisk
425 * driver does this. Skip just this inode
430 * Dirty memory-backed inode against a filesystem other
431 * than the kernel-internal bdev filesystem. Skip the
437 if (wbc
->nonblocking
&& bdi_write_congested(bdi
)) {
438 wbc
->encountered_congestion
= 1;
439 if (!sb_is_blkdev_sb(sb
))
440 break; /* Skip a congested fs */
442 continue; /* Skip a congested blockdev */
445 if (wbc
->bdi
&& bdi
!= wbc
->bdi
) {
446 if (!sb_is_blkdev_sb(sb
))
447 break; /* fs has the wrong queue */
449 continue; /* blockdev has wrong queue */
452 /* Was this inode dirtied after sync_sb_inodes was called? */
453 if (time_after(inode
->dirtied_when
, start
))
456 /* Is another pdflush already flushing this queue? */
457 if (current_is_pdflush() && !writeback_acquire(bdi
))
460 BUG_ON(inode
->i_state
& I_FREEING
);
462 pages_skipped
= wbc
->pages_skipped
;
463 __writeback_single_inode(inode
, wbc
);
464 if (wbc
->sync_mode
== WB_SYNC_HOLD
) {
465 inode
->dirtied_when
= jiffies
;
466 list_move(&inode
->i_list
, &sb
->s_dirty
);
468 if (current_is_pdflush())
469 writeback_release(bdi
);
470 if (wbc
->pages_skipped
!= pages_skipped
) {
472 * writeback is not making progress due to locked
473 * buffers. Skip this inode for now.
477 spin_unlock(&inode_lock
);
480 spin_lock(&inode_lock
);
481 if (wbc
->nr_to_write
<= 0) {
485 if (!list_empty(&sb
->s_more_io
))
488 return; /* Leave any unwritten inodes on s_io */
492 * Start writeback of dirty pagecache data against all unlocked inodes.
495 * We don't need to grab a reference to superblock here. If it has non-empty
496 * ->s_dirty it's hadn't been killed yet and kill_super() won't proceed
497 * past sync_inodes_sb() until the ->s_dirty/s_io/s_more_io lists are all
498 * empty. Since __sync_single_inode() regains inode_lock before it finally moves
499 * inode from superblock lists we are OK.
501 * If `older_than_this' is non-zero then only flush inodes which have a
502 * flushtime older than *older_than_this.
504 * If `bdi' is non-zero then we will scan the first inode against each
505 * superblock until we find the matching ones. One group will be the dirty
506 * inodes against a filesystem. Then when we hit the dummy blockdev superblock,
507 * sync_sb_inodes will seekout the blockdev which matches `bdi'. Maybe not
508 * super-efficient but we're about to do a ton of I/O...
511 writeback_inodes(struct writeback_control
*wbc
)
513 struct super_block
*sb
;
518 list_for_each_entry_reverse(sb
, &super_blocks
, s_list
) {
519 if (sb_has_dirty_inodes(sb
)) {
520 /* we're making our own get_super here */
522 spin_unlock(&sb_lock
);
524 * If we can't get the readlock, there's no sense in
525 * waiting around, most of the time the FS is going to
526 * be unmounted by the time it is released.
528 if (down_read_trylock(&sb
->s_umount
)) {
530 spin_lock(&inode_lock
);
531 sync_sb_inodes(sb
, wbc
);
532 spin_unlock(&inode_lock
);
534 up_read(&sb
->s_umount
);
537 if (__put_super_and_need_restart(sb
))
540 if (wbc
->nr_to_write
<= 0)
543 spin_unlock(&sb_lock
);
547 * writeback and wait upon the filesystem's dirty inodes. The caller will
548 * do this in two passes - one to write, and one to wait. WB_SYNC_HOLD is
549 * used to park the written inodes on sb->s_dirty for the wait pass.
551 * A finite limit is set on the number of pages which will be written.
552 * To prevent infinite livelock of sys_sync().
554 * We add in the number of potentially dirty inodes, because each inode write
555 * can dirty pagecache in the underlying blockdev.
557 void sync_inodes_sb(struct super_block
*sb
, int wait
)
559 struct writeback_control wbc
= {
560 .sync_mode
= wait
? WB_SYNC_ALL
: WB_SYNC_HOLD
,
562 .range_end
= LLONG_MAX
,
564 unsigned long nr_dirty
= global_page_state(NR_FILE_DIRTY
);
565 unsigned long nr_unstable
= global_page_state(NR_UNSTABLE_NFS
);
567 wbc
.nr_to_write
= nr_dirty
+ nr_unstable
+
568 (inodes_stat
.nr_inodes
- inodes_stat
.nr_unused
) +
569 nr_dirty
+ nr_unstable
;
570 wbc
.nr_to_write
+= wbc
.nr_to_write
/ 2; /* Bit more for luck */
571 spin_lock(&inode_lock
);
572 sync_sb_inodes(sb
, &wbc
);
573 spin_unlock(&inode_lock
);
577 * Rather lame livelock avoidance.
579 static void set_sb_syncing(int val
)
581 struct super_block
*sb
;
583 list_for_each_entry_reverse(sb
, &super_blocks
, s_list
)
585 spin_unlock(&sb_lock
);
589 * sync_inodes - writes all inodes to disk
590 * @wait: wait for completion
592 * sync_inodes() goes through each super block's dirty inode list, writes the
593 * inodes out, waits on the writeout and puts the inodes back on the normal
596 * This is for sys_sync(). fsync_dev() uses the same algorithm. The subtle
597 * part of the sync functions is that the blockdev "superblock" is processed
598 * last. This is because the write_inode() function of a typical fs will
599 * perform no I/O, but will mark buffers in the blockdev mapping as dirty.
600 * What we want to do is to perform all that dirtying first, and then write
601 * back all those inode blocks via the blockdev mapping in one sweep. So the
602 * additional (somewhat redundant) sync_blockdev() calls here are to make
603 * sure that really happens. Because if we call sync_inodes_sb(wait=1) with
604 * outstanding dirty inodes, the writeback goes block-at-a-time within the
605 * filesystem's write_inode(). This is extremely slow.
607 static void __sync_inodes(int wait
)
609 struct super_block
*sb
;
613 list_for_each_entry(sb
, &super_blocks
, s_list
) {
618 spin_unlock(&sb_lock
);
619 down_read(&sb
->s_umount
);
621 sync_inodes_sb(sb
, wait
);
622 sync_blockdev(sb
->s_bdev
);
624 up_read(&sb
->s_umount
);
626 if (__put_super_and_need_restart(sb
))
629 spin_unlock(&sb_lock
);
632 void sync_inodes(int wait
)
644 * write_inode_now - write an inode to disk
645 * @inode: inode to write to disk
646 * @sync: whether the write should be synchronous or not
648 * This function commits an inode to disk immediately if it is dirty. This is
649 * primarily needed by knfsd.
651 * The caller must either have a ref on the inode or must have set I_WILL_FREE.
653 int write_inode_now(struct inode
*inode
, int sync
)
656 struct writeback_control wbc
= {
657 .nr_to_write
= LONG_MAX
,
658 .sync_mode
= WB_SYNC_ALL
,
660 .range_end
= LLONG_MAX
,
663 if (!mapping_cap_writeback_dirty(inode
->i_mapping
))
667 spin_lock(&inode_lock
);
668 ret
= __writeback_single_inode(inode
, &wbc
);
669 spin_unlock(&inode_lock
);
671 inode_sync_wait(inode
);
674 EXPORT_SYMBOL(write_inode_now
);
677 * sync_inode - write an inode and its pages to disk.
678 * @inode: the inode to sync
679 * @wbc: controls the writeback mode
681 * sync_inode() will write an inode and its pages to disk. It will also
682 * correctly update the inode on its superblock's dirty inode lists and will
683 * update inode->i_state.
685 * The caller must have a ref on the inode.
687 int sync_inode(struct inode
*inode
, struct writeback_control
*wbc
)
691 spin_lock(&inode_lock
);
692 ret
= __writeback_single_inode(inode
, wbc
);
693 spin_unlock(&inode_lock
);
696 EXPORT_SYMBOL(sync_inode
);
699 * generic_osync_inode - flush all dirty data for a given inode to disk
700 * @inode: inode to write
701 * @mapping: the address_space that should be flushed
702 * @what: what to write and wait upon
704 * This can be called by file_write functions for files which have the
705 * O_SYNC flag set, to flush dirty writes to disk.
707 * @what is a bitmask, specifying which part of the inode's data should be
708 * written and waited upon.
710 * OSYNC_DATA: i_mapping's dirty data
711 * OSYNC_METADATA: the buffers at i_mapping->private_list
712 * OSYNC_INODE: the inode itself
715 int generic_osync_inode(struct inode
*inode
, struct address_space
*mapping
, int what
)
718 int need_write_inode_now
= 0;
721 if (what
& OSYNC_DATA
)
722 err
= filemap_fdatawrite(mapping
);
723 if (what
& (OSYNC_METADATA
|OSYNC_DATA
)) {
724 err2
= sync_mapping_buffers(mapping
);
728 if (what
& OSYNC_DATA
) {
729 err2
= filemap_fdatawait(mapping
);
734 spin_lock(&inode_lock
);
735 if ((inode
->i_state
& I_DIRTY
) &&
736 ((what
& OSYNC_INODE
) || (inode
->i_state
& I_DIRTY_DATASYNC
)))
737 need_write_inode_now
= 1;
738 spin_unlock(&inode_lock
);
740 if (need_write_inode_now
) {
741 err2
= write_inode_now(inode
, 1);
746 inode_sync_wait(inode
);
751 EXPORT_SYMBOL(generic_osync_inode
);
754 * writeback_acquire: attempt to get exclusive writeback access to a device
755 * @bdi: the device's backing_dev_info structure
757 * It is a waste of resources to have more than one pdflush thread blocked on
758 * a single request queue. Exclusion at the request_queue level is obtained
759 * via a flag in the request_queue's backing_dev_info.state.
761 * Non-request_queue-backed address_spaces will share default_backing_dev_info,
762 * unless they implement their own. Which is somewhat inefficient, as this
763 * may prevent concurrent writeback against multiple devices.
765 int writeback_acquire(struct backing_dev_info
*bdi
)
767 return !test_and_set_bit(BDI_pdflush
, &bdi
->state
);
771 * writeback_in_progress: determine whether there is writeback in progress
772 * @bdi: the device's backing_dev_info structure.
774 * Determine whether there is writeback in progress against a backing device.
776 int writeback_in_progress(struct backing_dev_info
*bdi
)
778 return test_bit(BDI_pdflush
, &bdi
->state
);
782 * writeback_release: relinquish exclusive writeback access against a device.
783 * @bdi: the device's backing_dev_info structure
785 void writeback_release(struct backing_dev_info
*bdi
)
787 BUG_ON(!writeback_in_progress(bdi
));
788 clear_bit(BDI_pdflush
, &bdi
->state
);