writeback: fix WB_SYNC_NONE writeback from umount
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / fs / fs-writeback.c
blob67db89786e7dae77cfdb3bbbc9092e7892393354
1 /*
2 * fs/fs-writeback.c
4 * Copyright (C) 2002, Linus Torvalds.
6 * Contains all the functions related to writing back and waiting
7 * upon dirty inodes against superblocks, and writing back dirty
8 * pages against inodes. ie: data writeback. Writeout of the
9 * inode itself is not handled here.
11 * 10Apr2002 Andrew Morton
12 * Split out of fs/inode.c
13 * Additions for address_space-based writeback
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/spinlock.h>
19 #include <linux/slab.h>
20 #include <linux/sched.h>
21 #include <linux/fs.h>
22 #include <linux/mm.h>
23 #include <linux/kthread.h>
24 #include <linux/freezer.h>
25 #include <linux/writeback.h>
26 #include <linux/blkdev.h>
27 #include <linux/backing-dev.h>
28 #include <linux/buffer_head.h>
29 #include "internal.h"
31 #define inode_to_bdi(inode) ((inode)->i_mapping->backing_dev_info)
34 * We don't actually have pdflush, but this one is exported though /proc...
36 int nr_pdflush_threads;
39 * Passed into wb_writeback(), essentially a subset of writeback_control
41 struct wb_writeback_args {
42 long nr_pages;
43 struct super_block *sb;
44 enum writeback_sync_modes sync_mode;
45 int for_kupdate:1;
46 int range_cyclic:1;
47 int for_background:1;
48 int sb_pinned:1;
52 * Work items for the bdi_writeback threads
54 struct bdi_work {
55 struct list_head list; /* pending work list */
56 struct rcu_head rcu_head; /* for RCU free/clear of work */
58 unsigned long seen; /* threads that have seen this work */
59 atomic_t pending; /* number of threads still to do work */
61 struct wb_writeback_args args; /* writeback arguments */
63 unsigned long state; /* flag bits, see WS_* */
66 enum {
67 WS_USED_B = 0,
68 WS_ONSTACK_B,
71 #define WS_USED (1 << WS_USED_B)
72 #define WS_ONSTACK (1 << WS_ONSTACK_B)
74 static inline bool bdi_work_on_stack(struct bdi_work *work)
76 return test_bit(WS_ONSTACK_B, &work->state);
79 static inline void bdi_work_init(struct bdi_work *work,
80 struct wb_writeback_args *args)
82 INIT_RCU_HEAD(&work->rcu_head);
83 work->args = *args;
84 work->state = WS_USED;
87 /**
88 * writeback_in_progress - determine whether there is writeback in progress
89 * @bdi: the device's backing_dev_info structure.
91 * Determine whether there is writeback waiting to be handled against a
92 * backing device.
94 int writeback_in_progress(struct backing_dev_info *bdi)
96 return !list_empty(&bdi->work_list);
99 static void bdi_work_clear(struct bdi_work *work)
101 clear_bit(WS_USED_B, &work->state);
102 smp_mb__after_clear_bit();
104 * work can have disappeared at this point. bit waitq functions
105 * should be able to tolerate this, provided bdi_sched_wait does
106 * not dereference it's pointer argument.
108 wake_up_bit(&work->state, WS_USED_B);
111 static void bdi_work_free(struct rcu_head *head)
113 struct bdi_work *work = container_of(head, struct bdi_work, rcu_head);
115 if (!bdi_work_on_stack(work))
116 kfree(work);
117 else
118 bdi_work_clear(work);
121 static void wb_work_complete(struct bdi_work *work)
123 const enum writeback_sync_modes sync_mode = work->args.sync_mode;
124 int onstack = bdi_work_on_stack(work);
127 * For allocated work, we can clear the done/seen bit right here.
128 * For on-stack work, we need to postpone both the clear and free
129 * to after the RCU grace period, since the stack could be invalidated
130 * as soon as bdi_work_clear() has done the wakeup.
132 if (!onstack)
133 bdi_work_clear(work);
134 if (sync_mode == WB_SYNC_NONE || onstack)
135 call_rcu(&work->rcu_head, bdi_work_free);
138 static void wb_clear_pending(struct bdi_writeback *wb, struct bdi_work *work)
141 * The caller has retrieved the work arguments from this work,
142 * drop our reference. If this is the last ref, delete and free it
144 if (atomic_dec_and_test(&work->pending)) {
145 struct backing_dev_info *bdi = wb->bdi;
147 spin_lock(&bdi->wb_lock);
148 list_del_rcu(&work->list);
149 spin_unlock(&bdi->wb_lock);
151 wb_work_complete(work);
155 static void bdi_queue_work(struct backing_dev_info *bdi, struct bdi_work *work)
157 work->seen = bdi->wb_mask;
158 BUG_ON(!work->seen);
159 atomic_set(&work->pending, bdi->wb_cnt);
160 BUG_ON(!bdi->wb_cnt);
163 * list_add_tail_rcu() contains the necessary barriers to
164 * make sure the above stores are seen before the item is
165 * noticed on the list
167 spin_lock(&bdi->wb_lock);
168 list_add_tail_rcu(&work->list, &bdi->work_list);
169 spin_unlock(&bdi->wb_lock);
172 * If the default thread isn't there, make sure we add it. When
173 * it gets created and wakes up, we'll run this work.
175 if (unlikely(list_empty_careful(&bdi->wb_list)))
176 wake_up_process(default_backing_dev_info.wb.task);
177 else {
178 struct bdi_writeback *wb = &bdi->wb;
180 if (wb->task)
181 wake_up_process(wb->task);
186 * Used for on-stack allocated work items. The caller needs to wait until
187 * the wb threads have acked the work before it's safe to continue.
189 static void bdi_wait_on_work_clear(struct bdi_work *work)
191 wait_on_bit(&work->state, WS_USED_B, bdi_sched_wait,
192 TASK_UNINTERRUPTIBLE);
195 static void bdi_alloc_queue_work(struct backing_dev_info *bdi,
196 struct wb_writeback_args *args)
198 struct bdi_work *work;
201 * This is WB_SYNC_NONE writeback, so if allocation fails just
202 * wakeup the thread for old dirty data writeback
204 work = kmalloc(sizeof(*work), GFP_ATOMIC);
205 if (work) {
206 bdi_work_init(work, args);
207 bdi_queue_work(bdi, work);
208 } else {
209 struct bdi_writeback *wb = &bdi->wb;
211 if (wb->task)
212 wake_up_process(wb->task);
217 * bdi_sync_writeback - start and wait for writeback
218 * @bdi: the backing device to write from
219 * @sb: write inodes from this super_block
221 * Description:
222 * This does WB_SYNC_ALL data integrity writeback and waits for the
223 * IO to complete. Callers must hold the sb s_umount semaphore for
224 * reading, to avoid having the super disappear before we are done.
226 static void bdi_sync_writeback(struct backing_dev_info *bdi,
227 struct super_block *sb)
229 struct wb_writeback_args args = {
230 .sb = sb,
231 .sync_mode = WB_SYNC_ALL,
232 .nr_pages = LONG_MAX,
233 .range_cyclic = 0,
235 * Setting sb_pinned is not necessary for WB_SYNC_ALL, but
236 * lets make it explicitly clear.
238 .sb_pinned = 1,
240 struct bdi_work work;
242 bdi_work_init(&work, &args);
243 work.state |= WS_ONSTACK;
245 bdi_queue_work(bdi, &work);
246 bdi_wait_on_work_clear(&work);
250 * bdi_start_writeback - start writeback
251 * @bdi: the backing device to write from
252 * @sb: write inodes from this super_block
253 * @nr_pages: the number of pages to write
254 * @sb_locked: caller already holds sb umount sem.
256 * Description:
257 * This does WB_SYNC_NONE opportunistic writeback. The IO is only
258 * started when this function returns, we make no guarentees on
259 * completion. Caller specifies whether sb umount sem is held already or not.
262 void bdi_start_writeback(struct backing_dev_info *bdi, struct super_block *sb,
263 long nr_pages, int sb_locked)
265 struct wb_writeback_args args = {
266 .sb = sb,
267 .sync_mode = WB_SYNC_NONE,
268 .nr_pages = nr_pages,
269 .range_cyclic = 1,
270 .sb_pinned = sb_locked,
274 * We treat @nr_pages=0 as the special case to do background writeback,
275 * ie. to sync pages until the background dirty threshold is reached.
277 if (!nr_pages) {
278 args.nr_pages = LONG_MAX;
279 args.for_background = 1;
282 bdi_alloc_queue_work(bdi, &args);
286 * Redirty an inode: set its when-it-was dirtied timestamp and move it to the
287 * furthest end of its superblock's dirty-inode list.
289 * Before stamping the inode's ->dirtied_when, we check to see whether it is
290 * already the most-recently-dirtied inode on the b_dirty list. If that is
291 * the case then the inode must have been redirtied while it was being written
292 * out and we don't reset its dirtied_when.
294 static void redirty_tail(struct inode *inode)
296 struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
298 if (!list_empty(&wb->b_dirty)) {
299 struct inode *tail;
301 tail = list_entry(wb->b_dirty.next, struct inode, i_list);
302 if (time_before(inode->dirtied_when, tail->dirtied_when))
303 inode->dirtied_when = jiffies;
305 list_move(&inode->i_list, &wb->b_dirty);
309 * requeue inode for re-scanning after bdi->b_io list is exhausted.
311 static void requeue_io(struct inode *inode)
313 struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
315 list_move(&inode->i_list, &wb->b_more_io);
318 static void inode_sync_complete(struct inode *inode)
321 * Prevent speculative execution through spin_unlock(&inode_lock);
323 smp_mb();
324 wake_up_bit(&inode->i_state, __I_SYNC);
327 static bool inode_dirtied_after(struct inode *inode, unsigned long t)
329 bool ret = time_after(inode->dirtied_when, t);
330 #ifndef CONFIG_64BIT
332 * For inodes being constantly redirtied, dirtied_when can get stuck.
333 * It _appears_ to be in the future, but is actually in distant past.
334 * This test is necessary to prevent such wrapped-around relative times
335 * from permanently stopping the whole bdi writeback.
337 ret = ret && time_before_eq(inode->dirtied_when, jiffies);
338 #endif
339 return ret;
343 * Move expired dirty inodes from @delaying_queue to @dispatch_queue.
345 static void move_expired_inodes(struct list_head *delaying_queue,
346 struct list_head *dispatch_queue,
347 unsigned long *older_than_this)
349 LIST_HEAD(tmp);
350 struct list_head *pos, *node;
351 struct super_block *sb = NULL;
352 struct inode *inode;
353 int do_sb_sort = 0;
355 while (!list_empty(delaying_queue)) {
356 inode = list_entry(delaying_queue->prev, struct inode, i_list);
357 if (older_than_this &&
358 inode_dirtied_after(inode, *older_than_this))
359 break;
360 if (sb && sb != inode->i_sb)
361 do_sb_sort = 1;
362 sb = inode->i_sb;
363 list_move(&inode->i_list, &tmp);
366 /* just one sb in list, splice to dispatch_queue and we're done */
367 if (!do_sb_sort) {
368 list_splice(&tmp, dispatch_queue);
369 return;
372 /* Move inodes from one superblock together */
373 while (!list_empty(&tmp)) {
374 inode = list_entry(tmp.prev, struct inode, i_list);
375 sb = inode->i_sb;
376 list_for_each_prev_safe(pos, node, &tmp) {
377 inode = list_entry(pos, struct inode, i_list);
378 if (inode->i_sb == sb)
379 list_move(&inode->i_list, dispatch_queue);
385 * Queue all expired dirty inodes for io, eldest first.
387 static void queue_io(struct bdi_writeback *wb, unsigned long *older_than_this)
389 list_splice_init(&wb->b_more_io, wb->b_io.prev);
390 move_expired_inodes(&wb->b_dirty, &wb->b_io, older_than_this);
393 static int write_inode(struct inode *inode, struct writeback_control *wbc)
395 if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode))
396 return inode->i_sb->s_op->write_inode(inode, wbc);
397 return 0;
401 * Wait for writeback on an inode to complete.
403 static void inode_wait_for_writeback(struct inode *inode)
405 DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
406 wait_queue_head_t *wqh;
408 wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
409 do {
410 spin_unlock(&inode_lock);
411 __wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE);
412 spin_lock(&inode_lock);
413 } while (inode->i_state & I_SYNC);
417 * Write out an inode's dirty pages. Called under inode_lock. Either the
418 * caller has ref on the inode (either via __iget or via syscall against an fd)
419 * or the inode has I_WILL_FREE set (via generic_forget_inode)
421 * If `wait' is set, wait on the writeout.
423 * The whole writeout design is quite complex and fragile. We want to avoid
424 * starvation of particular inodes when others are being redirtied, prevent
425 * livelocks, etc.
427 * Called under inode_lock.
429 static int
430 writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
432 struct address_space *mapping = inode->i_mapping;
433 unsigned dirty;
434 int ret;
436 if (!atomic_read(&inode->i_count))
437 WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
438 else
439 WARN_ON(inode->i_state & I_WILL_FREE);
441 if (inode->i_state & I_SYNC) {
443 * If this inode is locked for writeback and we are not doing
444 * writeback-for-data-integrity, move it to b_more_io so that
445 * writeback can proceed with the other inodes on s_io.
447 * We'll have another go at writing back this inode when we
448 * completed a full scan of b_io.
450 if (wbc->sync_mode != WB_SYNC_ALL) {
451 requeue_io(inode);
452 return 0;
456 * It's a data-integrity sync. We must wait.
458 inode_wait_for_writeback(inode);
461 BUG_ON(inode->i_state & I_SYNC);
463 /* Set I_SYNC, reset I_DIRTY */
464 dirty = inode->i_state & I_DIRTY;
465 inode->i_state |= I_SYNC;
466 inode->i_state &= ~I_DIRTY;
468 spin_unlock(&inode_lock);
470 ret = do_writepages(mapping, wbc);
473 * Make sure to wait on the data before writing out the metadata.
474 * This is important for filesystems that modify metadata on data
475 * I/O completion.
477 if (wbc->sync_mode == WB_SYNC_ALL) {
478 int err = filemap_fdatawait(mapping);
479 if (ret == 0)
480 ret = err;
483 /* Don't write the inode if only I_DIRTY_PAGES was set */
484 if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
485 int err = write_inode(inode, wbc);
486 if (ret == 0)
487 ret = err;
490 spin_lock(&inode_lock);
491 inode->i_state &= ~I_SYNC;
492 if (!(inode->i_state & (I_FREEING | I_CLEAR))) {
493 if ((inode->i_state & I_DIRTY_PAGES) && wbc->for_kupdate) {
495 * More pages get dirtied by a fast dirtier.
497 goto select_queue;
498 } else if (inode->i_state & I_DIRTY) {
500 * At least XFS will redirty the inode during the
501 * writeback (delalloc) and on io completion (isize).
503 redirty_tail(inode);
504 } else if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
506 * We didn't write back all the pages. nfs_writepages()
507 * sometimes bales out without doing anything. Redirty
508 * the inode; Move it from b_io onto b_more_io/b_dirty.
511 * akpm: if the caller was the kupdate function we put
512 * this inode at the head of b_dirty so it gets first
513 * consideration. Otherwise, move it to the tail, for
514 * the reasons described there. I'm not really sure
515 * how much sense this makes. Presumably I had a good
516 * reasons for doing it this way, and I'd rather not
517 * muck with it at present.
519 if (wbc->for_kupdate) {
521 * For the kupdate function we move the inode
522 * to b_more_io so it will get more writeout as
523 * soon as the queue becomes uncongested.
525 inode->i_state |= I_DIRTY_PAGES;
526 select_queue:
527 if (wbc->nr_to_write <= 0) {
529 * slice used up: queue for next turn
531 requeue_io(inode);
532 } else {
534 * somehow blocked: retry later
536 redirty_tail(inode);
538 } else {
540 * Otherwise fully redirty the inode so that
541 * other inodes on this superblock will get some
542 * writeout. Otherwise heavy writing to one
543 * file would indefinitely suspend writeout of
544 * all the other files.
546 inode->i_state |= I_DIRTY_PAGES;
547 redirty_tail(inode);
549 } else if (atomic_read(&inode->i_count)) {
551 * The inode is clean, inuse
553 list_move(&inode->i_list, &inode_in_use);
554 } else {
556 * The inode is clean, unused
558 list_move(&inode->i_list, &inode_unused);
561 inode_sync_complete(inode);
562 return ret;
565 static void unpin_sb_for_writeback(struct super_block *sb)
567 up_read(&sb->s_umount);
568 put_super(sb);
571 enum sb_pin_state {
572 SB_PINNED,
573 SB_NOT_PINNED,
574 SB_PIN_FAILED
578 * For WB_SYNC_NONE writeback, the caller does not have the sb pinned
579 * before calling writeback. So make sure that we do pin it, so it doesn't
580 * go away while we are writing inodes from it.
582 static enum sb_pin_state pin_sb_for_writeback(struct writeback_control *wbc,
583 struct super_block *sb)
586 * Caller must already hold the ref for this
588 if (wbc->sync_mode == WB_SYNC_ALL || wbc->sb_pinned) {
589 WARN_ON(!rwsem_is_locked(&sb->s_umount));
590 return SB_NOT_PINNED;
592 spin_lock(&sb_lock);
593 sb->s_count++;
594 if (down_read_trylock(&sb->s_umount)) {
595 if (sb->s_root) {
596 spin_unlock(&sb_lock);
597 return SB_PINNED;
600 * umounted, drop rwsem again and fall through to failure
602 up_read(&sb->s_umount);
604 sb->s_count--;
605 spin_unlock(&sb_lock);
606 return SB_PIN_FAILED;
610 * Write a portion of b_io inodes which belong to @sb.
611 * If @wbc->sb != NULL, then find and write all such
612 * inodes. Otherwise write only ones which go sequentially
613 * in reverse order.
614 * Return 1, if the caller writeback routine should be
615 * interrupted. Otherwise return 0.
617 static int writeback_sb_inodes(struct super_block *sb,
618 struct bdi_writeback *wb,
619 struct writeback_control *wbc)
621 while (!list_empty(&wb->b_io)) {
622 long pages_skipped;
623 struct inode *inode = list_entry(wb->b_io.prev,
624 struct inode, i_list);
625 if (wbc->sb && sb != inode->i_sb) {
626 /* super block given and doesn't
627 match, skip this inode */
628 redirty_tail(inode);
629 continue;
631 if (sb != inode->i_sb)
632 /* finish with this superblock */
633 return 0;
634 if (inode->i_state & (I_NEW | I_WILL_FREE)) {
635 requeue_io(inode);
636 continue;
639 * Was this inode dirtied after sync_sb_inodes was called?
640 * This keeps sync from extra jobs and livelock.
642 if (inode_dirtied_after(inode, wbc->wb_start))
643 return 1;
645 BUG_ON(inode->i_state & (I_FREEING | I_CLEAR));
646 __iget(inode);
647 pages_skipped = wbc->pages_skipped;
648 writeback_single_inode(inode, wbc);
649 if (wbc->pages_skipped != pages_skipped) {
651 * writeback is not making progress due to locked
652 * buffers. Skip this inode for now.
654 redirty_tail(inode);
656 spin_unlock(&inode_lock);
657 iput(inode);
658 cond_resched();
659 spin_lock(&inode_lock);
660 if (wbc->nr_to_write <= 0) {
661 wbc->more_io = 1;
662 return 1;
664 if (!list_empty(&wb->b_more_io))
665 wbc->more_io = 1;
667 /* b_io is empty */
668 return 1;
671 static void writeback_inodes_wb(struct bdi_writeback *wb,
672 struct writeback_control *wbc)
674 int ret = 0;
676 wbc->wb_start = jiffies; /* livelock avoidance */
677 spin_lock(&inode_lock);
678 if (!wbc->for_kupdate || list_empty(&wb->b_io))
679 queue_io(wb, wbc->older_than_this);
681 while (!list_empty(&wb->b_io)) {
682 struct inode *inode = list_entry(wb->b_io.prev,
683 struct inode, i_list);
684 struct super_block *sb = inode->i_sb;
685 enum sb_pin_state state;
687 if (wbc->sb && sb != wbc->sb) {
688 /* super block given and doesn't
689 match, skip this inode */
690 redirty_tail(inode);
691 continue;
693 state = pin_sb_for_writeback(wbc, sb);
695 if (state == SB_PIN_FAILED) {
696 requeue_io(inode);
697 continue;
699 ret = writeback_sb_inodes(sb, wb, wbc);
701 if (state == SB_PINNED)
702 unpin_sb_for_writeback(sb);
703 if (ret)
704 break;
706 spin_unlock(&inode_lock);
707 /* Leave any unwritten inodes on b_io */
710 void writeback_inodes_wbc(struct writeback_control *wbc)
712 struct backing_dev_info *bdi = wbc->bdi;
714 writeback_inodes_wb(&bdi->wb, wbc);
718 * The maximum number of pages to writeout in a single bdi flush/kupdate
719 * operation. We do this so we don't hold I_SYNC against an inode for
720 * enormous amounts of time, which would block a userspace task which has
721 * been forced to throttle against that inode. Also, the code reevaluates
722 * the dirty each time it has written this many pages.
724 #define MAX_WRITEBACK_PAGES 1024
726 static inline bool over_bground_thresh(void)
728 unsigned long background_thresh, dirty_thresh;
730 get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL);
732 return (global_page_state(NR_FILE_DIRTY) +
733 global_page_state(NR_UNSTABLE_NFS) >= background_thresh);
737 * Explicit flushing or periodic writeback of "old" data.
739 * Define "old": the first time one of an inode's pages is dirtied, we mark the
740 * dirtying-time in the inode's address_space. So this periodic writeback code
741 * just walks the superblock inode list, writing back any inodes which are
742 * older than a specific point in time.
744 * Try to run once per dirty_writeback_interval. But if a writeback event
745 * takes longer than a dirty_writeback_interval interval, then leave a
746 * one-second gap.
748 * older_than_this takes precedence over nr_to_write. So we'll only write back
749 * all dirty pages if they are all attached to "old" mappings.
751 static long wb_writeback(struct bdi_writeback *wb,
752 struct wb_writeback_args *args)
754 struct writeback_control wbc = {
755 .bdi = wb->bdi,
756 .sb = args->sb,
757 .sync_mode = args->sync_mode,
758 .older_than_this = NULL,
759 .for_kupdate = args->for_kupdate,
760 .for_background = args->for_background,
761 .range_cyclic = args->range_cyclic,
762 .sb_pinned = args->sb_pinned,
764 unsigned long oldest_jif;
765 long wrote = 0;
766 struct inode *inode;
768 if (wbc.for_kupdate) {
769 wbc.older_than_this = &oldest_jif;
770 oldest_jif = jiffies -
771 msecs_to_jiffies(dirty_expire_interval * 10);
773 if (!wbc.range_cyclic) {
774 wbc.range_start = 0;
775 wbc.range_end = LLONG_MAX;
778 for (;;) {
780 * Stop writeback when nr_pages has been consumed
782 if (args->nr_pages <= 0)
783 break;
786 * For background writeout, stop when we are below the
787 * background dirty threshold
789 if (args->for_background && !over_bground_thresh())
790 break;
792 wbc.more_io = 0;
793 wbc.nr_to_write = MAX_WRITEBACK_PAGES;
794 wbc.pages_skipped = 0;
795 writeback_inodes_wb(wb, &wbc);
796 args->nr_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
797 wrote += MAX_WRITEBACK_PAGES - wbc.nr_to_write;
800 * If we consumed everything, see if we have more
802 if (wbc.nr_to_write <= 0)
803 continue;
805 * Didn't write everything and we don't have more IO, bail
807 if (!wbc.more_io)
808 break;
810 * Did we write something? Try for more
812 if (wbc.nr_to_write < MAX_WRITEBACK_PAGES)
813 continue;
815 * Nothing written. Wait for some inode to
816 * become available for writeback. Otherwise
817 * we'll just busyloop.
819 spin_lock(&inode_lock);
820 if (!list_empty(&wb->b_more_io)) {
821 inode = list_entry(wb->b_more_io.prev,
822 struct inode, i_list);
823 inode_wait_for_writeback(inode);
825 spin_unlock(&inode_lock);
828 return wrote;
832 * Return the next bdi_work struct that hasn't been processed by this
833 * wb thread yet. ->seen is initially set for each thread that exists
834 * for this device, when a thread first notices a piece of work it
835 * clears its bit. Depending on writeback type, the thread will notify
836 * completion on either receiving the work (WB_SYNC_NONE) or after
837 * it is done (WB_SYNC_ALL).
839 static struct bdi_work *get_next_work_item(struct backing_dev_info *bdi,
840 struct bdi_writeback *wb)
842 struct bdi_work *work, *ret = NULL;
844 rcu_read_lock();
846 list_for_each_entry_rcu(work, &bdi->work_list, list) {
847 if (!test_bit(wb->nr, &work->seen))
848 continue;
849 clear_bit(wb->nr, &work->seen);
851 ret = work;
852 break;
855 rcu_read_unlock();
856 return ret;
859 static long wb_check_old_data_flush(struct bdi_writeback *wb)
861 unsigned long expired;
862 long nr_pages;
865 * When set to zero, disable periodic writeback
867 if (!dirty_writeback_interval)
868 return 0;
870 expired = wb->last_old_flush +
871 msecs_to_jiffies(dirty_writeback_interval * 10);
872 if (time_before(jiffies, expired))
873 return 0;
875 wb->last_old_flush = jiffies;
876 nr_pages = global_page_state(NR_FILE_DIRTY) +
877 global_page_state(NR_UNSTABLE_NFS) +
878 (inodes_stat.nr_inodes - inodes_stat.nr_unused);
880 if (nr_pages) {
881 struct wb_writeback_args args = {
882 .nr_pages = nr_pages,
883 .sync_mode = WB_SYNC_NONE,
884 .for_kupdate = 1,
885 .range_cyclic = 1,
888 return wb_writeback(wb, &args);
891 return 0;
895 * Retrieve work items and do the writeback they describe
897 long wb_do_writeback(struct bdi_writeback *wb, int force_wait)
899 struct backing_dev_info *bdi = wb->bdi;
900 struct bdi_work *work;
901 long wrote = 0;
903 while ((work = get_next_work_item(bdi, wb)) != NULL) {
904 struct wb_writeback_args args = work->args;
907 * Override sync mode, in case we must wait for completion
909 if (force_wait)
910 work->args.sync_mode = args.sync_mode = WB_SYNC_ALL;
913 * If this isn't a data integrity operation, just notify
914 * that we have seen this work and we are now starting it.
916 if (args.sync_mode == WB_SYNC_NONE)
917 wb_clear_pending(wb, work);
919 wrote += wb_writeback(wb, &args);
922 * This is a data integrity writeback, so only do the
923 * notification when we have completed the work.
925 if (args.sync_mode == WB_SYNC_ALL)
926 wb_clear_pending(wb, work);
930 * Check for periodic writeback, kupdated() style
932 wrote += wb_check_old_data_flush(wb);
934 return wrote;
938 * Handle writeback of dirty data for the device backed by this bdi. Also
939 * wakes up periodically and does kupdated style flushing.
941 int bdi_writeback_task(struct bdi_writeback *wb)
943 unsigned long last_active = jiffies;
944 unsigned long wait_jiffies = -1UL;
945 long pages_written;
947 while (!kthread_should_stop()) {
948 pages_written = wb_do_writeback(wb, 0);
950 if (pages_written)
951 last_active = jiffies;
952 else if (wait_jiffies != -1UL) {
953 unsigned long max_idle;
956 * Longest period of inactivity that we tolerate. If we
957 * see dirty data again later, the task will get
958 * recreated automatically.
960 max_idle = max(5UL * 60 * HZ, wait_jiffies);
961 if (time_after(jiffies, max_idle + last_active))
962 break;
965 if (dirty_writeback_interval) {
966 wait_jiffies = msecs_to_jiffies(dirty_writeback_interval * 10);
967 schedule_timeout_interruptible(wait_jiffies);
968 } else
969 schedule();
971 try_to_freeze();
974 return 0;
978 * Schedule writeback for all backing devices. This does WB_SYNC_NONE
979 * writeback, for integrity writeback see bdi_sync_writeback().
981 static void bdi_writeback_all(struct super_block *sb, long nr_pages)
983 struct wb_writeback_args args = {
984 .sb = sb,
985 .nr_pages = nr_pages,
986 .sync_mode = WB_SYNC_NONE,
988 struct backing_dev_info *bdi;
990 rcu_read_lock();
992 list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
993 if (!bdi_has_dirty_io(bdi))
994 continue;
996 bdi_alloc_queue_work(bdi, &args);
999 rcu_read_unlock();
1003 * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back
1004 * the whole world.
1006 void wakeup_flusher_threads(long nr_pages)
1008 if (nr_pages == 0)
1009 nr_pages = global_page_state(NR_FILE_DIRTY) +
1010 global_page_state(NR_UNSTABLE_NFS);
1011 bdi_writeback_all(NULL, nr_pages);
1014 static noinline void block_dump___mark_inode_dirty(struct inode *inode)
1016 if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
1017 struct dentry *dentry;
1018 const char *name = "?";
1020 dentry = d_find_alias(inode);
1021 if (dentry) {
1022 spin_lock(&dentry->d_lock);
1023 name = (const char *) dentry->d_name.name;
1025 printk(KERN_DEBUG
1026 "%s(%d): dirtied inode %lu (%s) on %s\n",
1027 current->comm, task_pid_nr(current), inode->i_ino,
1028 name, inode->i_sb->s_id);
1029 if (dentry) {
1030 spin_unlock(&dentry->d_lock);
1031 dput(dentry);
1037 * __mark_inode_dirty - internal function
1038 * @inode: inode to mark
1039 * @flags: what kind of dirty (i.e. I_DIRTY_SYNC)
1040 * Mark an inode as dirty. Callers should use mark_inode_dirty or
1041 * mark_inode_dirty_sync.
1043 * Put the inode on the super block's dirty list.
1045 * CAREFUL! We mark it dirty unconditionally, but move it onto the
1046 * dirty list only if it is hashed or if it refers to a blockdev.
1047 * If it was not hashed, it will never be added to the dirty list
1048 * even if it is later hashed, as it will have been marked dirty already.
1050 * In short, make sure you hash any inodes _before_ you start marking
1051 * them dirty.
1053 * This function *must* be atomic for the I_DIRTY_PAGES case -
1054 * set_page_dirty() is called under spinlock in several places.
1056 * Note that for blockdevs, inode->dirtied_when represents the dirtying time of
1057 * the block-special inode (/dev/hda1) itself. And the ->dirtied_when field of
1058 * the kernel-internal blockdev inode represents the dirtying time of the
1059 * blockdev's pages. This is why for I_DIRTY_PAGES we always use
1060 * page->mapping->host, so the page-dirtying time is recorded in the internal
1061 * blockdev inode.
1063 void __mark_inode_dirty(struct inode *inode, int flags)
1065 struct super_block *sb = inode->i_sb;
1068 * Don't do this for I_DIRTY_PAGES - that doesn't actually
1069 * dirty the inode itself
1071 if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
1072 if (sb->s_op->dirty_inode)
1073 sb->s_op->dirty_inode(inode);
1077 * make sure that changes are seen by all cpus before we test i_state
1078 * -- mikulas
1080 smp_mb();
1082 /* avoid the locking if we can */
1083 if ((inode->i_state & flags) == flags)
1084 return;
1086 if (unlikely(block_dump))
1087 block_dump___mark_inode_dirty(inode);
1089 spin_lock(&inode_lock);
1090 if ((inode->i_state & flags) != flags) {
1091 const int was_dirty = inode->i_state & I_DIRTY;
1093 inode->i_state |= flags;
1096 * If the inode is being synced, just update its dirty state.
1097 * The unlocker will place the inode on the appropriate
1098 * superblock list, based upon its state.
1100 if (inode->i_state & I_SYNC)
1101 goto out;
1104 * Only add valid (hashed) inodes to the superblock's
1105 * dirty list. Add blockdev inodes as well.
1107 if (!S_ISBLK(inode->i_mode)) {
1108 if (hlist_unhashed(&inode->i_hash))
1109 goto out;
1111 if (inode->i_state & (I_FREEING|I_CLEAR))
1112 goto out;
1115 * If the inode was already on b_dirty/b_io/b_more_io, don't
1116 * reposition it (that would break b_dirty time-ordering).
1118 if (!was_dirty) {
1119 struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
1120 struct backing_dev_info *bdi = wb->bdi;
1122 if (bdi_cap_writeback_dirty(bdi) &&
1123 !test_bit(BDI_registered, &bdi->state)) {
1124 WARN_ON(1);
1125 printk(KERN_ERR "bdi-%s not registered\n",
1126 bdi->name);
1129 inode->dirtied_when = jiffies;
1130 list_move(&inode->i_list, &wb->b_dirty);
1133 out:
1134 spin_unlock(&inode_lock);
1136 EXPORT_SYMBOL(__mark_inode_dirty);
1139 * Write out a superblock's list of dirty inodes. A wait will be performed
1140 * upon no inodes, all inodes or the final one, depending upon sync_mode.
1142 * If older_than_this is non-NULL, then only write out inodes which
1143 * had their first dirtying at a time earlier than *older_than_this.
1145 * If `bdi' is non-zero then we're being asked to writeback a specific queue.
1146 * This function assumes that the blockdev superblock's inodes are backed by
1147 * a variety of queues, so all inodes are searched. For other superblocks,
1148 * assume that all inodes are backed by the same queue.
1150 * The inodes to be written are parked on bdi->b_io. They are moved back onto
1151 * bdi->b_dirty as they are selected for writing. This way, none can be missed
1152 * on the writer throttling path, and we get decent balancing between many
1153 * throttled threads: we don't want them all piling up on inode_sync_wait.
1155 static void wait_sb_inodes(struct super_block *sb)
1157 struct inode *inode, *old_inode = NULL;
1160 * We need to be protected against the filesystem going from
1161 * r/o to r/w or vice versa.
1163 WARN_ON(!rwsem_is_locked(&sb->s_umount));
1165 spin_lock(&inode_lock);
1168 * Data integrity sync. Must wait for all pages under writeback,
1169 * because there may have been pages dirtied before our sync
1170 * call, but which had writeout started before we write it out.
1171 * In which case, the inode may not be on the dirty list, but
1172 * we still have to wait for that writeout.
1174 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
1175 struct address_space *mapping;
1177 if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW))
1178 continue;
1179 mapping = inode->i_mapping;
1180 if (mapping->nrpages == 0)
1181 continue;
1182 __iget(inode);
1183 spin_unlock(&inode_lock);
1185 * We hold a reference to 'inode' so it couldn't have
1186 * been removed from s_inodes list while we dropped the
1187 * inode_lock. We cannot iput the inode now as we can
1188 * be holding the last reference and we cannot iput it
1189 * under inode_lock. So we keep the reference and iput
1190 * it later.
1192 iput(old_inode);
1193 old_inode = inode;
1195 filemap_fdatawait(mapping);
1197 cond_resched();
1199 spin_lock(&inode_lock);
1201 spin_unlock(&inode_lock);
1202 iput(old_inode);
1205 static void __writeback_inodes_sb(struct super_block *sb, int sb_locked)
1207 unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY);
1208 unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS);
1209 long nr_to_write;
1211 nr_to_write = nr_dirty + nr_unstable +
1212 (inodes_stat.nr_inodes - inodes_stat.nr_unused);
1214 bdi_start_writeback(sb->s_bdi, sb, nr_to_write, sb_locked);
1218 * writeback_inodes_sb - writeback dirty inodes from given super_block
1219 * @sb: the superblock
1221 * Start writeback on some inodes on this super_block. No guarantees are made
1222 * on how many (if any) will be written, and this function does not wait
1223 * for IO completion of submitted IO. The number of pages submitted is
1224 * returned.
1226 void writeback_inodes_sb(struct super_block *sb)
1228 __writeback_inodes_sb(sb, 0);
1230 EXPORT_SYMBOL(writeback_inodes_sb);
1233 * writeback_inodes_sb_locked - writeback dirty inodes from given super_block
1234 * @sb: the superblock
1236 * Like writeback_inodes_sb(), except the caller already holds the
1237 * sb umount sem.
1239 void writeback_inodes_sb_locked(struct super_block *sb)
1241 __writeback_inodes_sb(sb, 1);
1245 * writeback_inodes_sb_if_idle - start writeback if none underway
1246 * @sb: the superblock
1248 * Invoke writeback_inodes_sb if no writeback is currently underway.
1249 * Returns 1 if writeback was started, 0 if not.
1251 int writeback_inodes_sb_if_idle(struct super_block *sb)
1253 if (!writeback_in_progress(sb->s_bdi)) {
1254 writeback_inodes_sb(sb);
1255 return 1;
1256 } else
1257 return 0;
1259 EXPORT_SYMBOL(writeback_inodes_sb_if_idle);
1262 * sync_inodes_sb - sync sb inode pages
1263 * @sb: the superblock
1265 * This function writes and waits on any dirty inode belonging to this
1266 * super_block. The number of pages synced is returned.
1268 void sync_inodes_sb(struct super_block *sb)
1270 bdi_sync_writeback(sb->s_bdi, sb);
1271 wait_sb_inodes(sb);
1273 EXPORT_SYMBOL(sync_inodes_sb);
1276 * write_inode_now - write an inode to disk
1277 * @inode: inode to write to disk
1278 * @sync: whether the write should be synchronous or not
1280 * This function commits an inode to disk immediately if it is dirty. This is
1281 * primarily needed by knfsd.
1283 * The caller must either have a ref on the inode or must have set I_WILL_FREE.
1285 int write_inode_now(struct inode *inode, int sync)
1287 int ret;
1288 struct writeback_control wbc = {
1289 .nr_to_write = LONG_MAX,
1290 .sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
1291 .range_start = 0,
1292 .range_end = LLONG_MAX,
1295 if (!mapping_cap_writeback_dirty(inode->i_mapping))
1296 wbc.nr_to_write = 0;
1298 might_sleep();
1299 spin_lock(&inode_lock);
1300 ret = writeback_single_inode(inode, &wbc);
1301 spin_unlock(&inode_lock);
1302 if (sync)
1303 inode_sync_wait(inode);
1304 return ret;
1306 EXPORT_SYMBOL(write_inode_now);
1309 * sync_inode - write an inode and its pages to disk.
1310 * @inode: the inode to sync
1311 * @wbc: controls the writeback mode
1313 * sync_inode() will write an inode and its pages to disk. It will also
1314 * correctly update the inode on its superblock's dirty inode lists and will
1315 * update inode->i_state.
1317 * The caller must have a ref on the inode.
1319 int sync_inode(struct inode *inode, struct writeback_control *wbc)
1321 int ret;
1323 spin_lock(&inode_lock);
1324 ret = writeback_single_inode(inode, wbc);
1325 spin_unlock(&inode_lock);
1326 return ret;
1328 EXPORT_SYMBOL(sync_inode);