4 * Copyright (C) 2002, Linus Torvalds.
6 * Contains all the functions related to writing back and waiting
7 * upon dirty inodes against superblocks, and writing back dirty
8 * pages against inodes. ie: data writeback. Writeout of the
9 * inode itself is not handled here.
11 * 10Apr2002 Andrew Morton
12 * Split out of fs/inode.c
13 * Additions for address_space-based writeback
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/spinlock.h>
19 #include <linux/slab.h>
20 #include <linux/sched.h>
23 #include <linux/kthread.h>
24 #include <linux/freezer.h>
25 #include <linux/writeback.h>
26 #include <linux/blkdev.h>
27 #include <linux/backing-dev.h>
28 #include <linux/buffer_head.h>
31 #define inode_to_bdi(inode) ((inode)->i_mapping->backing_dev_info)
34 * We don't actually have pdflush, but this one is exported though /proc...
36 int nr_pdflush_threads
;
39 * Passed into wb_writeback(), essentially a subset of writeback_control
41 struct wb_writeback_work
{
43 struct super_block
*sb
;
44 enum writeback_sync_modes sync_mode
;
45 unsigned int for_kupdate
:1;
46 unsigned int range_cyclic
:1;
47 unsigned int for_background
:1;
49 struct list_head list
; /* pending work list */
50 struct completion
*done
; /* set if the caller waits */
54 * writeback_in_progress - determine whether there is writeback in progress
55 * @bdi: the device's backing_dev_info structure.
57 * Determine whether there is writeback waiting to be handled against a
60 int writeback_in_progress(struct backing_dev_info
*bdi
)
62 return !list_empty(&bdi
->work_list
);
65 static void bdi_queue_work(struct backing_dev_info
*bdi
,
66 struct wb_writeback_work
*work
)
68 spin_lock(&bdi
->wb_lock
);
69 list_add_tail(&work
->list
, &bdi
->work_list
);
70 spin_unlock(&bdi
->wb_lock
);
73 * If the default thread isn't there, make sure we add it. When
74 * it gets created and wakes up, we'll run this work.
76 if (unlikely(list_empty_careful(&bdi
->wb_list
)))
77 wake_up_process(default_backing_dev_info
.wb
.task
);
79 struct bdi_writeback
*wb
= &bdi
->wb
;
82 wake_up_process(wb
->task
);
87 __bdi_start_writeback(struct backing_dev_info
*bdi
, long nr_pages
,
88 bool range_cyclic
, bool for_background
)
90 struct wb_writeback_work
*work
;
93 * This is WB_SYNC_NONE writeback, so if allocation fails just
94 * wakeup the thread for old dirty data writeback
96 work
= kzalloc(sizeof(*work
), GFP_ATOMIC
);
99 wake_up_process(bdi
->wb
.task
);
103 work
->sync_mode
= WB_SYNC_NONE
;
104 work
->nr_pages
= nr_pages
;
105 work
->range_cyclic
= range_cyclic
;
106 work
->for_background
= for_background
;
108 bdi_queue_work(bdi
, work
);
112 * bdi_start_writeback - start writeback
113 * @bdi: the backing device to write from
114 * @nr_pages: the number of pages to write
117 * This does WB_SYNC_NONE opportunistic writeback. The IO is only
118 * started when this function returns, we make no guarentees on
119 * completion. Caller need not hold sb s_umount semaphore.
122 void bdi_start_writeback(struct backing_dev_info
*bdi
, long nr_pages
)
124 __bdi_start_writeback(bdi
, nr_pages
, true, false);
128 * bdi_start_background_writeback - start background writeback
129 * @bdi: the backing device to write from
132 * This does WB_SYNC_NONE background writeback. The IO is only
133 * started when this function returns, we make no guarentees on
134 * completion. Caller need not hold sb s_umount semaphore.
136 void bdi_start_background_writeback(struct backing_dev_info
*bdi
)
138 __bdi_start_writeback(bdi
, LONG_MAX
, true, true);
142 * Redirty an inode: set its when-it-was dirtied timestamp and move it to the
143 * furthest end of its superblock's dirty-inode list.
145 * Before stamping the inode's ->dirtied_when, we check to see whether it is
146 * already the most-recently-dirtied inode on the b_dirty list. If that is
147 * the case then the inode must have been redirtied while it was being written
148 * out and we don't reset its dirtied_when.
150 static void redirty_tail(struct inode
*inode
)
152 struct bdi_writeback
*wb
= &inode_to_bdi(inode
)->wb
;
154 if (!list_empty(&wb
->b_dirty
)) {
157 tail
= list_entry(wb
->b_dirty
.next
, struct inode
, i_list
);
158 if (time_before(inode
->dirtied_when
, tail
->dirtied_when
))
159 inode
->dirtied_when
= jiffies
;
161 list_move(&inode
->i_list
, &wb
->b_dirty
);
165 * requeue inode for re-scanning after bdi->b_io list is exhausted.
167 static void requeue_io(struct inode
*inode
)
169 struct bdi_writeback
*wb
= &inode_to_bdi(inode
)->wb
;
171 list_move(&inode
->i_list
, &wb
->b_more_io
);
174 static void inode_sync_complete(struct inode
*inode
)
177 * Prevent speculative execution through spin_unlock(&inode_lock);
180 wake_up_bit(&inode
->i_state
, __I_SYNC
);
183 static bool inode_dirtied_after(struct inode
*inode
, unsigned long t
)
185 bool ret
= time_after(inode
->dirtied_when
, t
);
188 * For inodes being constantly redirtied, dirtied_when can get stuck.
189 * It _appears_ to be in the future, but is actually in distant past.
190 * This test is necessary to prevent such wrapped-around relative times
191 * from permanently stopping the whole bdi writeback.
193 ret
= ret
&& time_before_eq(inode
->dirtied_when
, jiffies
);
199 * Move expired dirty inodes from @delaying_queue to @dispatch_queue.
201 static void move_expired_inodes(struct list_head
*delaying_queue
,
202 struct list_head
*dispatch_queue
,
203 unsigned long *older_than_this
)
206 struct list_head
*pos
, *node
;
207 struct super_block
*sb
= NULL
;
211 while (!list_empty(delaying_queue
)) {
212 inode
= list_entry(delaying_queue
->prev
, struct inode
, i_list
);
213 if (older_than_this
&&
214 inode_dirtied_after(inode
, *older_than_this
))
216 if (sb
&& sb
!= inode
->i_sb
)
219 list_move(&inode
->i_list
, &tmp
);
222 /* just one sb in list, splice to dispatch_queue and we're done */
224 list_splice(&tmp
, dispatch_queue
);
228 /* Move inodes from one superblock together */
229 while (!list_empty(&tmp
)) {
230 inode
= list_entry(tmp
.prev
, struct inode
, i_list
);
232 list_for_each_prev_safe(pos
, node
, &tmp
) {
233 inode
= list_entry(pos
, struct inode
, i_list
);
234 if (inode
->i_sb
== sb
)
235 list_move(&inode
->i_list
, dispatch_queue
);
241 * Queue all expired dirty inodes for io, eldest first.
243 static void queue_io(struct bdi_writeback
*wb
, unsigned long *older_than_this
)
245 list_splice_init(&wb
->b_more_io
, wb
->b_io
.prev
);
246 move_expired_inodes(&wb
->b_dirty
, &wb
->b_io
, older_than_this
);
249 static int write_inode(struct inode
*inode
, struct writeback_control
*wbc
)
251 if (inode
->i_sb
->s_op
->write_inode
&& !is_bad_inode(inode
))
252 return inode
->i_sb
->s_op
->write_inode(inode
, wbc
);
257 * Wait for writeback on an inode to complete.
259 static void inode_wait_for_writeback(struct inode
*inode
)
261 DEFINE_WAIT_BIT(wq
, &inode
->i_state
, __I_SYNC
);
262 wait_queue_head_t
*wqh
;
264 wqh
= bit_waitqueue(&inode
->i_state
, __I_SYNC
);
265 while (inode
->i_state
& I_SYNC
) {
266 spin_unlock(&inode_lock
);
267 __wait_on_bit(wqh
, &wq
, inode_wait
, TASK_UNINTERRUPTIBLE
);
268 spin_lock(&inode_lock
);
273 * Write out an inode's dirty pages. Called under inode_lock. Either the
274 * caller has ref on the inode (either via __iget or via syscall against an fd)
275 * or the inode has I_WILL_FREE set (via generic_forget_inode)
277 * If `wait' is set, wait on the writeout.
279 * The whole writeout design is quite complex and fragile. We want to avoid
280 * starvation of particular inodes when others are being redirtied, prevent
283 * Called under inode_lock.
286 writeback_single_inode(struct inode
*inode
, struct writeback_control
*wbc
)
288 struct address_space
*mapping
= inode
->i_mapping
;
292 if (!atomic_read(&inode
->i_count
))
293 WARN_ON(!(inode
->i_state
& (I_WILL_FREE
|I_FREEING
)));
295 WARN_ON(inode
->i_state
& I_WILL_FREE
);
297 if (inode
->i_state
& I_SYNC
) {
299 * If this inode is locked for writeback and we are not doing
300 * writeback-for-data-integrity, move it to b_more_io so that
301 * writeback can proceed with the other inodes on s_io.
303 * We'll have another go at writing back this inode when we
304 * completed a full scan of b_io.
306 if (wbc
->sync_mode
!= WB_SYNC_ALL
) {
312 * It's a data-integrity sync. We must wait.
314 inode_wait_for_writeback(inode
);
317 BUG_ON(inode
->i_state
& I_SYNC
);
319 /* Set I_SYNC, reset I_DIRTY_PAGES */
320 inode
->i_state
|= I_SYNC
;
321 inode
->i_state
&= ~I_DIRTY_PAGES
;
322 spin_unlock(&inode_lock
);
324 ret
= do_writepages(mapping
, wbc
);
327 * Make sure to wait on the data before writing out the metadata.
328 * This is important for filesystems that modify metadata on data
331 if (wbc
->sync_mode
== WB_SYNC_ALL
) {
332 int err
= filemap_fdatawait(mapping
);
338 * Some filesystems may redirty the inode during the writeback
339 * due to delalloc, clear dirty metadata flags right before
342 spin_lock(&inode_lock
);
343 dirty
= inode
->i_state
& I_DIRTY
;
344 inode
->i_state
&= ~(I_DIRTY_SYNC
| I_DIRTY_DATASYNC
);
345 spin_unlock(&inode_lock
);
346 /* Don't write the inode if only I_DIRTY_PAGES was set */
347 if (dirty
& (I_DIRTY_SYNC
| I_DIRTY_DATASYNC
)) {
348 int err
= write_inode(inode
, wbc
);
353 spin_lock(&inode_lock
);
354 inode
->i_state
&= ~I_SYNC
;
355 if (!(inode
->i_state
& (I_FREEING
| I_CLEAR
))) {
356 if ((inode
->i_state
& I_DIRTY_PAGES
) && wbc
->for_kupdate
) {
358 * More pages get dirtied by a fast dirtier.
361 } else if (inode
->i_state
& I_DIRTY
) {
363 * At least XFS will redirty the inode during the
364 * writeback (delalloc) and on io completion (isize).
367 } else if (mapping_tagged(mapping
, PAGECACHE_TAG_DIRTY
)) {
369 * We didn't write back all the pages. nfs_writepages()
370 * sometimes bales out without doing anything. Redirty
371 * the inode; Move it from b_io onto b_more_io/b_dirty.
374 * akpm: if the caller was the kupdate function we put
375 * this inode at the head of b_dirty so it gets first
376 * consideration. Otherwise, move it to the tail, for
377 * the reasons described there. I'm not really sure
378 * how much sense this makes. Presumably I had a good
379 * reasons for doing it this way, and I'd rather not
380 * muck with it at present.
382 if (wbc
->for_kupdate
) {
384 * For the kupdate function we move the inode
385 * to b_more_io so it will get more writeout as
386 * soon as the queue becomes uncongested.
388 inode
->i_state
|= I_DIRTY_PAGES
;
390 if (wbc
->nr_to_write
<= 0) {
392 * slice used up: queue for next turn
397 * somehow blocked: retry later
403 * Otherwise fully redirty the inode so that
404 * other inodes on this superblock will get some
405 * writeout. Otherwise heavy writing to one
406 * file would indefinitely suspend writeout of
407 * all the other files.
409 inode
->i_state
|= I_DIRTY_PAGES
;
412 } else if (atomic_read(&inode
->i_count
)) {
414 * The inode is clean, inuse
416 list_move(&inode
->i_list
, &inode_in_use
);
419 * The inode is clean, unused
421 list_move(&inode
->i_list
, &inode_unused
);
424 inode_sync_complete(inode
);
429 * For background writeback the caller does not have the sb pinned
430 * before calling writeback. So make sure that we do pin it, so it doesn't
431 * go away while we are writing inodes from it.
433 static bool pin_sb_for_writeback(struct super_block
*sb
)
436 if (list_empty(&sb
->s_instances
)) {
437 spin_unlock(&sb_lock
);
442 spin_unlock(&sb_lock
);
444 if (down_read_trylock(&sb
->s_umount
)) {
447 up_read(&sb
->s_umount
);
455 * Write a portion of b_io inodes which belong to @sb.
457 * If @only_this_sb is true, then find and write all such
458 * inodes. Otherwise write only ones which go sequentially
461 * Return 1, if the caller writeback routine should be
462 * interrupted. Otherwise return 0.
464 static int writeback_sb_inodes(struct super_block
*sb
, struct bdi_writeback
*wb
,
465 struct writeback_control
*wbc
, bool only_this_sb
)
467 while (!list_empty(&wb
->b_io
)) {
469 struct inode
*inode
= list_entry(wb
->b_io
.prev
,
470 struct inode
, i_list
);
472 if (inode
->i_sb
!= sb
) {
475 * We only want to write back data for this
476 * superblock, move all inodes not belonging
477 * to it back onto the dirty list.
484 * The inode belongs to a different superblock.
485 * Bounce back to the caller to unpin this and
486 * pin the next superblock.
491 if (inode
->i_state
& (I_NEW
| I_WILL_FREE
)) {
496 * Was this inode dirtied after sync_sb_inodes was called?
497 * This keeps sync from extra jobs and livelock.
499 if (inode_dirtied_after(inode
, wbc
->wb_start
))
502 BUG_ON(inode
->i_state
& (I_FREEING
| I_CLEAR
));
504 pages_skipped
= wbc
->pages_skipped
;
505 writeback_single_inode(inode
, wbc
);
506 if (wbc
->pages_skipped
!= pages_skipped
) {
508 * writeback is not making progress due to locked
509 * buffers. Skip this inode for now.
513 spin_unlock(&inode_lock
);
516 spin_lock(&inode_lock
);
517 if (wbc
->nr_to_write
<= 0) {
521 if (!list_empty(&wb
->b_more_io
))
528 void writeback_inodes_wb(struct bdi_writeback
*wb
,
529 struct writeback_control
*wbc
)
533 wbc
->wb_start
= jiffies
; /* livelock avoidance */
534 spin_lock(&inode_lock
);
535 if (!wbc
->for_kupdate
|| list_empty(&wb
->b_io
))
536 queue_io(wb
, wbc
->older_than_this
);
538 while (!list_empty(&wb
->b_io
)) {
539 struct inode
*inode
= list_entry(wb
->b_io
.prev
,
540 struct inode
, i_list
);
541 struct super_block
*sb
= inode
->i_sb
;
543 if (!pin_sb_for_writeback(sb
)) {
547 ret
= writeback_sb_inodes(sb
, wb
, wbc
, false);
553 spin_unlock(&inode_lock
);
554 /* Leave any unwritten inodes on b_io */
557 static void __writeback_inodes_sb(struct super_block
*sb
,
558 struct bdi_writeback
*wb
, struct writeback_control
*wbc
)
560 WARN_ON(!rwsem_is_locked(&sb
->s_umount
));
562 wbc
->wb_start
= jiffies
; /* livelock avoidance */
563 spin_lock(&inode_lock
);
564 if (!wbc
->for_kupdate
|| list_empty(&wb
->b_io
))
565 queue_io(wb
, wbc
->older_than_this
);
566 writeback_sb_inodes(sb
, wb
, wbc
, true);
567 spin_unlock(&inode_lock
);
571 * The maximum number of pages to writeout in a single bdi flush/kupdate
572 * operation. We do this so we don't hold I_SYNC against an inode for
573 * enormous amounts of time, which would block a userspace task which has
574 * been forced to throttle against that inode. Also, the code reevaluates
575 * the dirty each time it has written this many pages.
577 #define MAX_WRITEBACK_PAGES 1024
579 static inline bool over_bground_thresh(void)
581 unsigned long background_thresh
, dirty_thresh
;
583 get_dirty_limits(&background_thresh
, &dirty_thresh
, NULL
, NULL
);
585 return (global_page_state(NR_FILE_DIRTY
) +
586 global_page_state(NR_UNSTABLE_NFS
) >= background_thresh
);
590 * Explicit flushing or periodic writeback of "old" data.
592 * Define "old": the first time one of an inode's pages is dirtied, we mark the
593 * dirtying-time in the inode's address_space. So this periodic writeback code
594 * just walks the superblock inode list, writing back any inodes which are
595 * older than a specific point in time.
597 * Try to run once per dirty_writeback_interval. But if a writeback event
598 * takes longer than a dirty_writeback_interval interval, then leave a
601 * older_than_this takes precedence over nr_to_write. So we'll only write back
602 * all dirty pages if they are all attached to "old" mappings.
604 static long wb_writeback(struct bdi_writeback
*wb
,
605 struct wb_writeback_work
*work
)
607 struct writeback_control wbc
= {
608 .sync_mode
= work
->sync_mode
,
609 .older_than_this
= NULL
,
610 .for_kupdate
= work
->for_kupdate
,
611 .for_background
= work
->for_background
,
612 .range_cyclic
= work
->range_cyclic
,
614 unsigned long oldest_jif
;
618 if (wbc
.for_kupdate
) {
619 wbc
.older_than_this
= &oldest_jif
;
620 oldest_jif
= jiffies
-
621 msecs_to_jiffies(dirty_expire_interval
* 10);
623 if (!wbc
.range_cyclic
) {
625 wbc
.range_end
= LLONG_MAX
;
630 * Stop writeback when nr_pages has been consumed
632 if (work
->nr_pages
<= 0)
636 * For background writeout, stop when we are below the
637 * background dirty threshold
639 if (work
->for_background
&& !over_bground_thresh())
643 wbc
.nr_to_write
= MAX_WRITEBACK_PAGES
;
644 wbc
.pages_skipped
= 0;
646 __writeback_inodes_sb(work
->sb
, wb
, &wbc
);
648 writeback_inodes_wb(wb
, &wbc
);
649 work
->nr_pages
-= MAX_WRITEBACK_PAGES
- wbc
.nr_to_write
;
650 wrote
+= MAX_WRITEBACK_PAGES
- wbc
.nr_to_write
;
653 * If we consumed everything, see if we have more
655 if (wbc
.nr_to_write
<= 0)
658 * Didn't write everything and we don't have more IO, bail
663 * Did we write something? Try for more
665 if (wbc
.nr_to_write
< MAX_WRITEBACK_PAGES
)
668 * Nothing written. Wait for some inode to
669 * become available for writeback. Otherwise
670 * we'll just busyloop.
672 spin_lock(&inode_lock
);
673 if (!list_empty(&wb
->b_more_io
)) {
674 inode
= list_entry(wb
->b_more_io
.prev
,
675 struct inode
, i_list
);
676 inode_wait_for_writeback(inode
);
678 spin_unlock(&inode_lock
);
685 * Return the next wb_writeback_work struct that hasn't been processed yet.
687 static struct wb_writeback_work
*
688 get_next_work_item(struct backing_dev_info
*bdi
, struct bdi_writeback
*wb
)
690 struct wb_writeback_work
*work
= NULL
;
692 spin_lock(&bdi
->wb_lock
);
693 if (!list_empty(&bdi
->work_list
)) {
694 work
= list_entry(bdi
->work_list
.next
,
695 struct wb_writeback_work
, list
);
696 list_del_init(&work
->list
);
698 spin_unlock(&bdi
->wb_lock
);
702 static long wb_check_old_data_flush(struct bdi_writeback
*wb
)
704 unsigned long expired
;
708 * When set to zero, disable periodic writeback
710 if (!dirty_writeback_interval
)
713 expired
= wb
->last_old_flush
+
714 msecs_to_jiffies(dirty_writeback_interval
* 10);
715 if (time_before(jiffies
, expired
))
718 wb
->last_old_flush
= jiffies
;
719 nr_pages
= global_page_state(NR_FILE_DIRTY
) +
720 global_page_state(NR_UNSTABLE_NFS
) +
721 (inodes_stat
.nr_inodes
- inodes_stat
.nr_unused
);
724 struct wb_writeback_work work
= {
725 .nr_pages
= nr_pages
,
726 .sync_mode
= WB_SYNC_NONE
,
731 return wb_writeback(wb
, &work
);
738 * Retrieve work items and do the writeback they describe
740 long wb_do_writeback(struct bdi_writeback
*wb
, int force_wait
)
742 struct backing_dev_info
*bdi
= wb
->bdi
;
743 struct wb_writeback_work
*work
;
746 while ((work
= get_next_work_item(bdi
, wb
)) != NULL
) {
748 * Override sync mode, in case we must wait for completion
749 * because this thread is exiting now.
752 work
->sync_mode
= WB_SYNC_ALL
;
754 wrote
+= wb_writeback(wb
, work
);
757 * Notify the caller of completion if this is a synchronous
758 * work item, otherwise just free it.
761 complete(work
->done
);
767 * Check for periodic writeback, kupdated() style
769 wrote
+= wb_check_old_data_flush(wb
);
775 * Handle writeback of dirty data for the device backed by this bdi. Also
776 * wakes up periodically and does kupdated style flushing.
778 int bdi_writeback_task(struct bdi_writeback
*wb
)
780 unsigned long last_active
= jiffies
;
781 unsigned long wait_jiffies
= -1UL;
784 while (!kthread_should_stop()) {
785 pages_written
= wb_do_writeback(wb
, 0);
788 last_active
= jiffies
;
789 else if (wait_jiffies
!= -1UL) {
790 unsigned long max_idle
;
793 * Longest period of inactivity that we tolerate. If we
794 * see dirty data again later, the task will get
795 * recreated automatically.
797 max_idle
= max(5UL * 60 * HZ
, wait_jiffies
);
798 if (time_after(jiffies
, max_idle
+ last_active
))
802 if (dirty_writeback_interval
) {
803 wait_jiffies
= msecs_to_jiffies(dirty_writeback_interval
* 10);
804 schedule_timeout_interruptible(wait_jiffies
);
806 set_current_state(TASK_INTERRUPTIBLE
);
807 if (list_empty_careful(&wb
->bdi
->work_list
) &&
808 !kthread_should_stop())
810 __set_current_state(TASK_RUNNING
);
820 * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back
823 void wakeup_flusher_threads(long nr_pages
)
825 struct backing_dev_info
*bdi
;
828 nr_pages
= global_page_state(NR_FILE_DIRTY
) +
829 global_page_state(NR_UNSTABLE_NFS
);
833 list_for_each_entry_rcu(bdi
, &bdi_list
, bdi_list
) {
834 if (!bdi_has_dirty_io(bdi
))
836 __bdi_start_writeback(bdi
, nr_pages
, false, false);
841 static noinline
void block_dump___mark_inode_dirty(struct inode
*inode
)
843 if (inode
->i_ino
|| strcmp(inode
->i_sb
->s_id
, "bdev")) {
844 struct dentry
*dentry
;
845 const char *name
= "?";
847 dentry
= d_find_alias(inode
);
849 spin_lock(&dentry
->d_lock
);
850 name
= (const char *) dentry
->d_name
.name
;
853 "%s(%d): dirtied inode %lu (%s) on %s\n",
854 current
->comm
, task_pid_nr(current
), inode
->i_ino
,
855 name
, inode
->i_sb
->s_id
);
857 spin_unlock(&dentry
->d_lock
);
864 * __mark_inode_dirty - internal function
865 * @inode: inode to mark
866 * @flags: what kind of dirty (i.e. I_DIRTY_SYNC)
867 * Mark an inode as dirty. Callers should use mark_inode_dirty or
868 * mark_inode_dirty_sync.
870 * Put the inode on the super block's dirty list.
872 * CAREFUL! We mark it dirty unconditionally, but move it onto the
873 * dirty list only if it is hashed or if it refers to a blockdev.
874 * If it was not hashed, it will never be added to the dirty list
875 * even if it is later hashed, as it will have been marked dirty already.
877 * In short, make sure you hash any inodes _before_ you start marking
880 * This function *must* be atomic for the I_DIRTY_PAGES case -
881 * set_page_dirty() is called under spinlock in several places.
883 * Note that for blockdevs, inode->dirtied_when represents the dirtying time of
884 * the block-special inode (/dev/hda1) itself. And the ->dirtied_when field of
885 * the kernel-internal blockdev inode represents the dirtying time of the
886 * blockdev's pages. This is why for I_DIRTY_PAGES we always use
887 * page->mapping->host, so the page-dirtying time is recorded in the internal
890 void __mark_inode_dirty(struct inode
*inode
, int flags
)
892 struct super_block
*sb
= inode
->i_sb
;
895 * Don't do this for I_DIRTY_PAGES - that doesn't actually
896 * dirty the inode itself
898 if (flags
& (I_DIRTY_SYNC
| I_DIRTY_DATASYNC
)) {
899 if (sb
->s_op
->dirty_inode
)
900 sb
->s_op
->dirty_inode(inode
);
904 * make sure that changes are seen by all cpus before we test i_state
909 /* avoid the locking if we can */
910 if ((inode
->i_state
& flags
) == flags
)
913 if (unlikely(block_dump
))
914 block_dump___mark_inode_dirty(inode
);
916 spin_lock(&inode_lock
);
917 if ((inode
->i_state
& flags
) != flags
) {
918 const int was_dirty
= inode
->i_state
& I_DIRTY
;
920 inode
->i_state
|= flags
;
923 * If the inode is being synced, just update its dirty state.
924 * The unlocker will place the inode on the appropriate
925 * superblock list, based upon its state.
927 if (inode
->i_state
& I_SYNC
)
931 * Only add valid (hashed) inodes to the superblock's
932 * dirty list. Add blockdev inodes as well.
934 if (!S_ISBLK(inode
->i_mode
)) {
935 if (hlist_unhashed(&inode
->i_hash
))
938 if (inode
->i_state
& (I_FREEING
|I_CLEAR
))
942 * If the inode was already on b_dirty/b_io/b_more_io, don't
943 * reposition it (that would break b_dirty time-ordering).
946 struct bdi_writeback
*wb
= &inode_to_bdi(inode
)->wb
;
947 struct backing_dev_info
*bdi
= wb
->bdi
;
949 if (bdi_cap_writeback_dirty(bdi
) &&
950 !test_bit(BDI_registered
, &bdi
->state
)) {
952 printk(KERN_ERR
"bdi-%s not registered\n",
956 inode
->dirtied_when
= jiffies
;
957 list_move(&inode
->i_list
, &wb
->b_dirty
);
961 spin_unlock(&inode_lock
);
963 EXPORT_SYMBOL(__mark_inode_dirty
);
966 * Write out a superblock's list of dirty inodes. A wait will be performed
967 * upon no inodes, all inodes or the final one, depending upon sync_mode.
969 * If older_than_this is non-NULL, then only write out inodes which
970 * had their first dirtying at a time earlier than *older_than_this.
972 * If `bdi' is non-zero then we're being asked to writeback a specific queue.
973 * This function assumes that the blockdev superblock's inodes are backed by
974 * a variety of queues, so all inodes are searched. For other superblocks,
975 * assume that all inodes are backed by the same queue.
977 * The inodes to be written are parked on bdi->b_io. They are moved back onto
978 * bdi->b_dirty as they are selected for writing. This way, none can be missed
979 * on the writer throttling path, and we get decent balancing between many
980 * throttled threads: we don't want them all piling up on inode_sync_wait.
982 static void wait_sb_inodes(struct super_block
*sb
)
984 struct inode
*inode
, *old_inode
= NULL
;
987 * We need to be protected against the filesystem going from
988 * r/o to r/w or vice versa.
990 WARN_ON(!rwsem_is_locked(&sb
->s_umount
));
992 spin_lock(&inode_lock
);
995 * Data integrity sync. Must wait for all pages under writeback,
996 * because there may have been pages dirtied before our sync
997 * call, but which had writeout started before we write it out.
998 * In which case, the inode may not be on the dirty list, but
999 * we still have to wait for that writeout.
1001 list_for_each_entry(inode
, &sb
->s_inodes
, i_sb_list
) {
1002 struct address_space
*mapping
;
1004 if (inode
->i_state
& (I_FREEING
|I_CLEAR
|I_WILL_FREE
|I_NEW
))
1006 mapping
= inode
->i_mapping
;
1007 if (mapping
->nrpages
== 0)
1010 spin_unlock(&inode_lock
);
1012 * We hold a reference to 'inode' so it couldn't have
1013 * been removed from s_inodes list while we dropped the
1014 * inode_lock. We cannot iput the inode now as we can
1015 * be holding the last reference and we cannot iput it
1016 * under inode_lock. So we keep the reference and iput
1022 filemap_fdatawait(mapping
);
1026 spin_lock(&inode_lock
);
1028 spin_unlock(&inode_lock
);
1033 * writeback_inodes_sb - writeback dirty inodes from given super_block
1034 * @sb: the superblock
1036 * Start writeback on some inodes on this super_block. No guarantees are made
1037 * on how many (if any) will be written, and this function does not wait
1038 * for IO completion of submitted IO. The number of pages submitted is
1041 void writeback_inodes_sb(struct super_block
*sb
)
1043 unsigned long nr_dirty
= global_page_state(NR_FILE_DIRTY
);
1044 unsigned long nr_unstable
= global_page_state(NR_UNSTABLE_NFS
);
1045 DECLARE_COMPLETION_ONSTACK(done
);
1046 struct wb_writeback_work work
= {
1048 .sync_mode
= WB_SYNC_NONE
,
1052 WARN_ON(!rwsem_is_locked(&sb
->s_umount
));
1054 work
.nr_pages
= nr_dirty
+ nr_unstable
+
1055 (inodes_stat
.nr_inodes
- inodes_stat
.nr_unused
);
1057 bdi_queue_work(sb
->s_bdi
, &work
);
1058 wait_for_completion(&done
);
1060 EXPORT_SYMBOL(writeback_inodes_sb
);
1063 * writeback_inodes_sb_if_idle - start writeback if none underway
1064 * @sb: the superblock
1066 * Invoke writeback_inodes_sb if no writeback is currently underway.
1067 * Returns 1 if writeback was started, 0 if not.
1069 int writeback_inodes_sb_if_idle(struct super_block
*sb
)
1071 if (!writeback_in_progress(sb
->s_bdi
)) {
1072 down_read(&sb
->s_umount
);
1073 writeback_inodes_sb(sb
);
1074 up_read(&sb
->s_umount
);
1079 EXPORT_SYMBOL(writeback_inodes_sb_if_idle
);
1082 * sync_inodes_sb - sync sb inode pages
1083 * @sb: the superblock
1085 * This function writes and waits on any dirty inode belonging to this
1086 * super_block. The number of pages synced is returned.
1088 void sync_inodes_sb(struct super_block
*sb
)
1090 DECLARE_COMPLETION_ONSTACK(done
);
1091 struct wb_writeback_work work
= {
1093 .sync_mode
= WB_SYNC_ALL
,
1094 .nr_pages
= LONG_MAX
,
1099 WARN_ON(!rwsem_is_locked(&sb
->s_umount
));
1101 bdi_queue_work(sb
->s_bdi
, &work
);
1102 wait_for_completion(&done
);
1106 EXPORT_SYMBOL(sync_inodes_sb
);
1109 * write_inode_now - write an inode to disk
1110 * @inode: inode to write to disk
1111 * @sync: whether the write should be synchronous or not
1113 * This function commits an inode to disk immediately if it is dirty. This is
1114 * primarily needed by knfsd.
1116 * The caller must either have a ref on the inode or must have set I_WILL_FREE.
1118 int write_inode_now(struct inode
*inode
, int sync
)
1121 struct writeback_control wbc
= {
1122 .nr_to_write
= LONG_MAX
,
1123 .sync_mode
= sync
? WB_SYNC_ALL
: WB_SYNC_NONE
,
1125 .range_end
= LLONG_MAX
,
1128 if (!mapping_cap_writeback_dirty(inode
->i_mapping
))
1129 wbc
.nr_to_write
= 0;
1132 spin_lock(&inode_lock
);
1133 ret
= writeback_single_inode(inode
, &wbc
);
1134 spin_unlock(&inode_lock
);
1136 inode_sync_wait(inode
);
1139 EXPORT_SYMBOL(write_inode_now
);
1142 * sync_inode - write an inode and its pages to disk.
1143 * @inode: the inode to sync
1144 * @wbc: controls the writeback mode
1146 * sync_inode() will write an inode and its pages to disk. It will also
1147 * correctly update the inode on its superblock's dirty inode lists and will
1148 * update inode->i_state.
1150 * The caller must have a ref on the inode.
1152 int sync_inode(struct inode
*inode
, struct writeback_control
*wbc
)
1156 spin_lock(&inode_lock
);
1157 ret
= writeback_single_inode(inode
, wbc
);
1158 spin_unlock(&inode_lock
);
1161 EXPORT_SYMBOL(sync_inode
);