[PATCH] DVB: Other DVB core updates
[linux-2.6/history.git] / fs / fs-writeback.c
blob016891bb2b70125dc20780d89b3ed22f8f1ca069
1 /*
2 * fs/fs-writeback.c
4 * Copyright (C) 2002, Linus Torvalds.
6 * Contains all the functions related to writing back and waiting
7 * upon dirty inodes against superblocks, and writing back dirty
8 * pages against inodes. ie: data writeback. Writeout of the
9 * inode itself is not handled here.
11 * 10Apr2002 akpm@zip.com.au
12 * Split out of fs/inode.c
13 * Additions for address_space-based writeback
16 #include <linux/kernel.h>
17 #include <linux/spinlock.h>
18 #include <linux/sched.h>
19 #include <linux/fs.h>
20 #include <linux/mm.h>
21 #include <linux/writeback.h>
22 #include <linux/blkdev.h>
23 #include <linux/backing-dev.h>
24 #include <linux/buffer_head.h>
26 extern struct super_block *blockdev_superblock;
28 /**
29 * __mark_inode_dirty - internal function
30 * @inode: inode to mark
31 * @flags: what kind of dirty (i.e. I_DIRTY_SYNC)
32 * Mark an inode as dirty. Callers should use mark_inode_dirty or
33 * mark_inode_dirty_sync.
35 * Put the inode on the super block's dirty list.
37 * CAREFUL! We mark it dirty unconditionally, but move it onto the
38 * dirty list only if it is hashed or if it refers to a blockdev.
39 * If it was not hashed, it will never be added to the dirty list
40 * even if it is later hashed, as it will have been marked dirty already.
42 * In short, make sure you hash any inodes _before_ you start marking
43 * them dirty.
45 * This function *must* be atomic for the I_DIRTY_PAGES case -
46 * set_page_dirty() is called under spinlock in several places.
48 * Note that for blockdevs, inode->dirtied_when represents the dirtying time of
49 * the block-special inode (/dev/hda1) itself. And the ->dirtied_when field of
50 * the kernel-internal blockdev inode represents the dirtying time of the
51 * blockdev's pages. This is why for I_DIRTY_PAGES we always use
52 * page->mapping->host, so the page-dirtying time is recorded in the internal
53 * blockdev inode.
55 void __mark_inode_dirty(struct inode *inode, int flags)
57 struct super_block *sb = inode->i_sb;
60 * Don't do this for I_DIRTY_PAGES - that doesn't actually
61 * dirty the inode itself
63 if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
64 if (sb->s_op->dirty_inode)
65 sb->s_op->dirty_inode(inode);
69 * make sure that changes are seen by all cpus before we test i_state
70 * -- mikulas
72 smp_mb();
74 /* avoid the locking if we can */
75 if ((inode->i_state & flags) == flags)
76 return;
78 spin_lock(&inode_lock);
79 if ((inode->i_state & flags) != flags) {
80 const int was_dirty = inode->i_state & I_DIRTY;
82 inode->i_state |= flags;
85 * If the inode is locked, just update its dirty state.
86 * The unlocker will place the inode on the appropriate
87 * superblock list, based upon its state.
89 if (inode->i_state & I_LOCK)
90 goto out;
93 * Only add valid (hashed) inodes to the superblock's
94 * dirty list. Add blockdev inodes as well.
96 if (!S_ISBLK(inode->i_mode)) {
97 if (hlist_unhashed(&inode->i_hash))
98 goto out;
100 if (inode->i_state & (I_FREEING|I_CLEAR))
101 goto out;
104 * If the inode was already on s_dirty or s_io, don't
105 * reposition it (that would break s_dirty time-ordering).
107 if (!was_dirty) {
108 inode->dirtied_when = jiffies;
109 list_move(&inode->i_list, &sb->s_dirty);
112 out:
113 spin_unlock(&inode_lock);
116 EXPORT_SYMBOL(__mark_inode_dirty);
118 static void write_inode(struct inode *inode, int sync)
120 if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode))
121 inode->i_sb->s_op->write_inode(inode, sync);
125 * Write a single inode's dirty pages and inode data out to disk.
126 * If `wait' is set, wait on the writeout.
128 * The whole writeout design is quite complex and fragile. We want to avoid
129 * starvation of particular inodes when others are being redirtied, prevent
130 * livelocks, etc.
132 * So what we do is to move all pages which are to be written from dirty_pages
133 * onto io_pages. And keep on writing io_pages until it's empty. Refusing to
134 * move more pages onto io_pages until io_pages is empty. Once that point has
135 * been reached, we are ready to take another pass across the inode's dirty
136 * pages.
138 * Called under inode_lock.
140 static void
141 __sync_single_inode(struct inode *inode, struct writeback_control *wbc)
143 unsigned dirty;
144 struct address_space *mapping = inode->i_mapping;
145 struct super_block *sb = inode->i_sb;
146 int wait = wbc->sync_mode == WB_SYNC_ALL;
148 BUG_ON(inode->i_state & I_LOCK);
150 /* Set I_LOCK, reset I_DIRTY */
151 dirty = inode->i_state & I_DIRTY;
152 inode->i_state |= I_LOCK;
153 inode->i_state &= ~I_DIRTY;
156 * smp_rmb(); note: if you remove write_lock below, you must add this.
157 * mark_inode_dirty doesn't take spinlock, make sure that inode is not
158 * read speculatively by this cpu before &= ~I_DIRTY -- mikulas
161 spin_lock(&mapping->page_lock);
162 if (wait || !wbc->for_kupdate || list_empty(&mapping->io_pages))
163 list_splice_init(&mapping->dirty_pages, &mapping->io_pages);
164 spin_unlock(&mapping->page_lock);
165 spin_unlock(&inode_lock);
167 do_writepages(mapping, wbc);
169 /* Don't write the inode if only I_DIRTY_PAGES was set */
170 if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC))
171 write_inode(inode, wait);
173 if (wait)
174 filemap_fdatawait(mapping);
176 spin_lock(&inode_lock);
177 inode->i_state &= ~I_LOCK;
178 if (!(inode->i_state & I_FREEING)) {
179 if (!list_empty(&mapping->io_pages)) {
180 /* Needs more writeback */
181 inode->i_state |= I_DIRTY_PAGES;
182 } else if (!list_empty(&mapping->dirty_pages)) {
183 /* Redirtied */
184 inode->i_state |= I_DIRTY_PAGES;
185 inode->dirtied_when = jiffies;
186 list_move(&inode->i_list, &sb->s_dirty);
187 } else if (inode->i_state & I_DIRTY) {
188 /* Redirtied */
189 inode->dirtied_when = jiffies;
190 list_move(&inode->i_list, &sb->s_dirty);
191 } else if (atomic_read(&inode->i_count)) {
192 list_move(&inode->i_list, &inode_in_use);
193 } else {
194 list_move(&inode->i_list, &inode_unused);
197 wake_up_inode(inode);
201 * Write out an inode's dirty pages. Called under inode_lock.
203 static void
204 __writeback_single_inode(struct inode *inode,
205 struct writeback_control *wbc)
207 if ((wbc->sync_mode != WB_SYNC_ALL) && (inode->i_state & I_LOCK)) {
208 list_move(&inode->i_list, &inode->i_sb->s_dirty);
209 return;
213 * It's a data-integrity sync. We must wait.
215 while (inode->i_state & I_LOCK) {
216 __iget(inode);
217 spin_unlock(&inode_lock);
218 __wait_on_inode(inode);
219 iput(inode);
220 spin_lock(&inode_lock);
222 __sync_single_inode(inode, wbc);
226 * Write out a superblock's list of dirty inodes. A wait will be performed
227 * upon no inodes, all inodes or the final one, depending upon sync_mode.
229 * If older_than_this is non-NULL, then only write out inodes which
230 * had their first dirtying at a time earlier than *older_than_this.
232 * If we're a pdlfush thread, then implement pdflush collision avoidance
233 * against the entire list.
235 * WB_SYNC_HOLD is a hack for sys_sync(): reattach the inode to sb->s_dirty so
236 * that it can be located for waiting on in __writeback_single_inode().
238 * Called under inode_lock.
240 * If `bdi' is non-zero then we're being asked to writeback a specific queue.
241 * This function assumes that the blockdev superblock's inodes are backed by
242 * a variety of queues, so all inodes are searched. For other superblocks,
243 * assume that all inodes are backed by the same queue.
245 * FIXME: this linear search could get expensive with many fileystems. But
246 * how to fix? We need to go from an address_space to all inodes which share
247 * a queue with that address_space. (Easy: have a global "dirty superblocks"
248 * list).
250 * The inodes to be written are parked on sb->s_io. They are moved back onto
251 * sb->s_dirty as they are selected for writing. This way, none can be missed
252 * on the writer throttling path, and we get decent balancing between many
253 * throttled threads: we don't want them all piling up on __wait_on_inode.
255 static void
256 sync_sb_inodes(struct super_block *sb, struct writeback_control *wbc)
258 const unsigned long start = jiffies; /* livelock avoidance */
260 if (!wbc->for_kupdate || list_empty(&sb->s_io))
261 list_splice_init(&sb->s_dirty, &sb->s_io);
263 while (!list_empty(&sb->s_io)) {
264 struct inode *inode = list_entry(sb->s_io.prev,
265 struct inode, i_list);
266 struct address_space *mapping = inode->i_mapping;
267 struct backing_dev_info *bdi = mapping->backing_dev_info;
269 if (bdi->memory_backed) {
270 if (sb == blockdev_superblock) {
272 * Dirty memory-backed blockdev: the ramdisk
273 * driver does this.
275 list_move(&inode->i_list, &sb->s_dirty);
276 continue;
279 * Assume that all inodes on this superblock are memory
280 * backed. Skip the superblock.
282 break;
285 if (wbc->nonblocking && bdi_write_congested(bdi)) {
286 wbc->encountered_congestion = 1;
287 if (sb != blockdev_superblock)
288 break; /* Skip a congested fs */
289 list_move(&inode->i_list, &sb->s_dirty);
290 continue; /* Skip a congested blockdev */
293 if (wbc->bdi && bdi != wbc->bdi) {
294 if (sb != blockdev_superblock)
295 break; /* fs has the wrong queue */
296 list_move(&inode->i_list, &sb->s_dirty);
297 continue; /* blockdev has wrong queue */
300 /* Was this inode dirtied after sync_sb_inodes was called? */
301 if (time_after(inode->dirtied_when, start))
302 break;
304 /* Was this inode dirtied too recently? */
305 if (wbc->older_than_this && time_after(inode->dirtied_when,
306 *wbc->older_than_this))
307 break;
309 /* Is another pdflush already flushing this queue? */
310 if (current_is_pdflush() && !writeback_acquire(bdi))
311 break;
313 BUG_ON(inode->i_state & I_FREEING);
314 __iget(inode);
315 __writeback_single_inode(inode, wbc);
316 if (wbc->sync_mode == WB_SYNC_HOLD) {
317 inode->dirtied_when = jiffies;
318 list_move(&inode->i_list, &sb->s_dirty);
320 if (current_is_pdflush())
321 writeback_release(bdi);
322 spin_unlock(&inode_lock);
323 iput(inode);
324 spin_lock(&inode_lock);
325 if (wbc->nr_to_write <= 0)
326 break;
328 return; /* Leave any unwritten inodes on s_io */
332 * Start writeback of dirty pagecache data against all unlocked inodes.
334 * Note:
335 * We don't need to grab a reference to superblock here. If it has non-empty
336 * ->s_dirty it's hadn't been killed yet and kill_super() won't proceed
337 * past sync_inodes_sb() until both the ->s_dirty and ->s_io lists are
338 * empty. Since __sync_single_inode() regains inode_lock before it finally moves
339 * inode from superblock lists we are OK.
341 * If `older_than_this' is non-zero then only flush inodes which have a
342 * flushtime older than *older_than_this.
344 * If `bdi' is non-zero then we will scan the first inode against each
345 * superblock until we find the matching ones. One group will be the dirty
346 * inodes against a filesystem. Then when we hit the dummy blockdev superblock,
347 * sync_sb_inodes will seekout the blockdev which matches `bdi'. Maybe not
348 * super-efficient but we're about to do a ton of I/O...
350 void
351 writeback_inodes(struct writeback_control *wbc)
353 struct super_block *sb;
355 spin_lock(&inode_lock);
356 spin_lock(&sb_lock);
357 sb = sb_entry(super_blocks.prev);
358 for (; sb != sb_entry(&super_blocks); sb = sb_entry(sb->s_list.prev)) {
359 if (!list_empty(&sb->s_dirty) || !list_empty(&sb->s_io)) {
360 spin_unlock(&sb_lock);
361 sync_sb_inodes(sb, wbc);
362 spin_lock(&sb_lock);
364 if (wbc->nr_to_write <= 0)
365 break;
367 spin_unlock(&sb_lock);
368 spin_unlock(&inode_lock);
372 * writeback and wait upon the filesystem's dirty inodes. The caller will
373 * do this in two passes - one to write, and one to wait. WB_SYNC_HOLD is
374 * used to park the written inodes on sb->s_dirty for the wait pass.
376 * A finite limit is set on the number of pages which will be written.
377 * To prevent infinite livelock of sys_sync().
379 * We add in the number of potentially dirty inodes, because each inode write
380 * can dirty pagecache in the underlying blockdev.
382 void sync_inodes_sb(struct super_block *sb, int wait)
384 struct page_state ps;
385 struct writeback_control wbc = {
386 .bdi = NULL,
387 .sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_HOLD,
388 .older_than_this = NULL,
389 .nr_to_write = 0,
392 get_page_state(&ps);
393 wbc.nr_to_write = ps.nr_dirty + ps.nr_unstable +
394 (inodes_stat.nr_inodes - inodes_stat.nr_unused) +
395 ps.nr_dirty + ps.nr_unstable;
396 wbc.nr_to_write += wbc.nr_to_write / 2; /* Bit more for luck */
397 spin_lock(&inode_lock);
398 sync_sb_inodes(sb, &wbc);
399 spin_unlock(&inode_lock);
403 * Rather lame livelock avoidance.
405 static void set_sb_syncing(int val)
407 struct super_block *sb;
408 spin_lock(&sb_lock);
409 sb = sb_entry(super_blocks.prev);
410 for (; sb != sb_entry(&super_blocks); sb = sb_entry(sb->s_list.prev)) {
411 sb->s_syncing = val;
413 spin_unlock(&sb_lock);
417 * Find a superblock with inodes that need to be synced
419 static struct super_block *get_super_to_sync(void)
421 struct super_block *sb;
422 restart:
423 spin_lock(&sb_lock);
424 sb = sb_entry(super_blocks.prev);
425 for (; sb != sb_entry(&super_blocks); sb = sb_entry(sb->s_list.prev)) {
426 if (sb->s_syncing)
427 continue;
428 sb->s_syncing = 1;
429 sb->s_count++;
430 spin_unlock(&sb_lock);
431 down_read(&sb->s_umount);
432 if (!sb->s_root) {
433 drop_super(sb);
434 goto restart;
436 return sb;
438 spin_unlock(&sb_lock);
439 return NULL;
443 * sync_inodes
445 * sync_inodes() goes through each super block's dirty inode list, writes the
446 * inodes out, waits on the writeout and puts the inodes back on the normal
447 * list.
449 * This is for sys_sync(). fsync_dev() uses the same algorithm. The subtle
450 * part of the sync functions is that the blockdev "superblock" is processed
451 * last. This is because the write_inode() function of a typical fs will
452 * perform no I/O, but will mark buffers in the blockdev mapping as dirty.
453 * What we want to do is to perform all that dirtying first, and then write
454 * back all those inode blocks via the blockdev mapping in one sweep. So the
455 * additional (somewhat redundant) sync_blockdev() calls here are to make
456 * sure that really happens. Because if we call sync_inodes_sb(wait=1) with
457 * outstanding dirty inodes, the writeback goes block-at-a-time within the
458 * filesystem's write_inode(). This is extremely slow.
460 void sync_inodes(int wait)
462 struct super_block *sb;
464 set_sb_syncing(0);
465 while ((sb = get_super_to_sync()) != NULL) {
466 sync_inodes_sb(sb, 0);
467 sync_blockdev(sb->s_bdev);
468 drop_super(sb);
470 if (wait) {
471 set_sb_syncing(0);
472 while ((sb = get_super_to_sync()) != NULL) {
473 sync_inodes_sb(sb, 1);
474 sync_blockdev(sb->s_bdev);
475 drop_super(sb);
481 * write_inode_now - write an inode to disk
482 * @inode: inode to write to disk
483 * @sync: whether the write should be synchronous or not
485 * This function commits an inode to disk immediately if it is
486 * dirty. This is primarily needed by knfsd.
489 void write_inode_now(struct inode *inode, int sync)
491 struct writeback_control wbc = {
492 .nr_to_write = LONG_MAX,
493 .sync_mode = WB_SYNC_ALL,
496 spin_lock(&inode_lock);
497 __writeback_single_inode(inode, &wbc);
498 spin_unlock(&inode_lock);
499 if (sync)
500 wait_on_inode(inode);
503 EXPORT_SYMBOL(write_inode_now);
506 * generic_osync_inode - flush all dirty data for a given inode to disk
507 * @inode: inode to write
508 * @what: what to write and wait upon
510 * This can be called by file_write functions for files which have the
511 * O_SYNC flag set, to flush dirty writes to disk.
513 * @what is a bitmask, specifying which part of the inode's data should be
514 * written and waited upon:
516 * OSYNC_DATA: i_mapping's dirty data
517 * OSYNC_METADATA: the buffers at i_mapping->private_list
518 * OSYNC_INODE: the inode itself
521 int generic_osync_inode(struct inode *inode, struct address_space *mapping, int what)
523 int err = 0;
524 int need_write_inode_now = 0;
525 int err2;
527 current->flags |= PF_SYNCWRITE;
528 if (what & OSYNC_DATA)
529 err = filemap_fdatawrite(mapping);
530 if (what & (OSYNC_METADATA|OSYNC_DATA)) {
531 err2 = sync_mapping_buffers(mapping);
532 if (!err)
533 err = err2;
535 if (what & OSYNC_DATA) {
536 err2 = filemap_fdatawait(mapping);
537 if (!err)
538 err = err2;
540 current->flags &= ~PF_SYNCWRITE;
542 spin_lock(&inode_lock);
543 if ((inode->i_state & I_DIRTY) &&
544 ((what & OSYNC_INODE) || (inode->i_state & I_DIRTY_DATASYNC)))
545 need_write_inode_now = 1;
546 spin_unlock(&inode_lock);
548 if (need_write_inode_now)
549 write_inode_now(inode, 1);
550 else
551 wait_on_inode(inode);
553 return err;
556 EXPORT_SYMBOL(generic_osync_inode);
559 * writeback_acquire: attempt to get exclusive writeback access to a device
560 * @bdi: the device's backing_dev_info structure
562 * It is a waste of resources to have more than one pdflush thread blocked on
563 * a single request queue. Exclusion at the request_queue level is obtained
564 * via a flag in the request_queue's backing_dev_info.state.
566 * Non-request_queue-backed address_spaces will share default_backing_dev_info,
567 * unless they implement their own. Which is somewhat inefficient, as this
568 * may prevent concurrent writeback against multiple devices.
570 int writeback_acquire(struct backing_dev_info *bdi)
572 return !test_and_set_bit(BDI_pdflush, &bdi->state);
576 * writeback_in_progress: determine whether there is writeback in progress
577 * against a backing device.
578 * @bdi: the device's backing_dev_info structure.
580 int writeback_in_progress(struct backing_dev_info *bdi)
582 return test_bit(BDI_pdflush, &bdi->state);
586 * writeback_release: relinquish exclusive writeback access against a device.
587 * @bdi: the device's backing_dev_info structure
589 void writeback_release(struct backing_dev_info *bdi)
591 BUG_ON(!writeback_in_progress(bdi));
592 clear_bit(BDI_pdflush, &bdi->state);