Merge with Linux 2.5.74.
[linux-2.6/linux-mips.git] / fs / buffer.c
blob56c9f4e03bdd32fe8fb13417e9a6f1e6988f7c31
1 /*
2 * linux/fs/buffer.c
4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
5 */
7 /*
8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
10 * Removed a lot of unnecessary code and simplified things now that
11 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
13 * Speed up hash, lru, and free list operations. Use gfp() for allocating
14 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
16 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
18 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
21 #include <linux/config.h>
22 #include <linux/kernel.h>
23 #include <linux/fs.h>
24 #include <linux/mm.h>
25 #include <linux/percpu.h>
26 #include <linux/slab.h>
27 #include <linux/smp_lock.h>
28 #include <linux/blkdev.h>
29 #include <linux/file.h>
30 #include <linux/quotaops.h>
31 #include <linux/highmem.h>
32 #include <linux/module.h>
33 #include <linux/writeback.h>
34 #include <linux/hash.h>
35 #include <linux/suspend.h>
36 #include <linux/buffer_head.h>
37 #include <linux/bio.h>
38 #include <linux/notifier.h>
39 #include <linux/cpu.h>
40 #include <asm/bitops.h>
42 static void invalidate_bh_lrus(void);
44 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
47 * Hashed waitqueue_head's for wait_on_buffer()
49 #define BH_WAIT_TABLE_ORDER 7
50 static struct bh_wait_queue_head {
51 wait_queue_head_t wqh;
52 } ____cacheline_aligned_in_smp bh_wait_queue_heads[1<<BH_WAIT_TABLE_ORDER];
55 * Debug/devel support stuff
58 void __buffer_error(char *file, int line)
60 static int enough;
62 if (enough > 10)
63 return;
64 enough++;
65 printk("buffer layer error at %s:%d\n", file, line);
66 #ifndef CONFIG_KALLSYMS
67 printk("Pass this trace through ksymoops for reporting\n");
68 #endif
69 dump_stack();
71 EXPORT_SYMBOL(__buffer_error);
73 inline void
74 init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
76 bh->b_end_io = handler;
77 bh->b_private = private;
81 * Return the address of the waitqueue_head to be used for this
82 * buffer_head
84 wait_queue_head_t *bh_waitq_head(struct buffer_head *bh)
86 return &bh_wait_queue_heads[hash_ptr(bh, BH_WAIT_TABLE_ORDER)].wqh;
88 EXPORT_SYMBOL(bh_waitq_head);
90 void wake_up_buffer(struct buffer_head *bh)
92 wait_queue_head_t *wq = bh_waitq_head(bh);
94 if (waitqueue_active(wq))
95 wake_up_all(wq);
97 EXPORT_SYMBOL(wake_up_buffer);
99 void unlock_buffer(struct buffer_head *bh)
102 * unlock_buffer against a zero-count bh is a bug, if the page
103 * is not locked. Because then nothing protects the buffer's
104 * waitqueue, which is used here. (Well. Other locked buffers
105 * against the page will pin it. But complain anyway).
107 if (atomic_read(&bh->b_count) == 0 &&
108 !PageLocked(bh->b_page) &&
109 !PageWriteback(bh->b_page))
110 buffer_error();
112 clear_buffer_locked(bh);
113 smp_mb__after_clear_bit();
114 wake_up_buffer(bh);
118 * Block until a buffer comes unlocked. This doesn't stop it
119 * from becoming locked again - you have to lock it yourself
120 * if you want to preserve its state.
122 void __wait_on_buffer(struct buffer_head * bh)
124 wait_queue_head_t *wqh = bh_waitq_head(bh);
125 DEFINE_WAIT(wait);
127 if (atomic_read(&bh->b_count) == 0 &&
128 (!bh->b_page || !PageLocked(bh->b_page)))
129 buffer_error();
131 do {
132 prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
133 if (buffer_locked(bh)) {
134 blk_run_queues();
135 io_schedule();
137 } while (buffer_locked(bh));
138 finish_wait(wqh, &wait);
141 static void
142 __set_page_buffers(struct page *page, struct buffer_head *head)
144 if (page_has_buffers(page))
145 buffer_error();
146 page_cache_get(page);
147 SetPagePrivate(page);
148 page->private = (unsigned long)head;
151 static void
152 __clear_page_buffers(struct page *page)
154 ClearPagePrivate(page);
155 page->private = 0;
156 page_cache_release(page);
159 static void buffer_io_error(struct buffer_head *bh)
161 char b[BDEVNAME_SIZE];
163 printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
164 bdevname(bh->b_bdev, b),
165 (unsigned long long)bh->b_blocknr);
169 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
170 * unlock the buffer. This is what ll_rw_block uses too.
172 void end_buffer_io_sync(struct buffer_head *bh, int uptodate)
174 if (uptodate) {
175 set_buffer_uptodate(bh);
176 } else {
178 * This happens, due to failed READA attempts.
179 * buffer_io_error(bh);
181 clear_buffer_uptodate(bh);
183 unlock_buffer(bh);
184 put_bh(bh);
188 * Write out and wait upon all the dirty data associated with a block
189 * device via its mapping. Does not take the superblock lock.
191 int sync_blockdev(struct block_device *bdev)
193 int ret = 0;
195 if (bdev) {
196 int err;
198 ret = filemap_fdatawrite(bdev->bd_inode->i_mapping);
199 err = filemap_fdatawait(bdev->bd_inode->i_mapping);
200 if (!ret)
201 ret = err;
203 return ret;
205 EXPORT_SYMBOL(sync_blockdev);
208 * Write out and wait upon all dirty data associated with this
209 * superblock. Filesystem data as well as the underlying block
210 * device. Takes the superblock lock.
212 int fsync_super(struct super_block *sb)
214 sync_inodes_sb(sb, 0);
215 DQUOT_SYNC(sb);
216 lock_super(sb);
217 if (sb->s_dirt && sb->s_op->write_super)
218 sb->s_op->write_super(sb);
219 unlock_super(sb);
220 if (sb->s_op->sync_fs)
221 sb->s_op->sync_fs(sb, 1);
222 sync_blockdev(sb->s_bdev);
223 sync_inodes_sb(sb, 1);
225 return sync_blockdev(sb->s_bdev);
229 * Write out and wait upon all dirty data associated with this
230 * device. Filesystem data as well as the underlying block
231 * device. Takes the superblock lock.
233 int fsync_bdev(struct block_device *bdev)
235 struct super_block *sb = get_super(bdev);
236 if (sb) {
237 int res = fsync_super(sb);
238 drop_super(sb);
239 return res;
241 return sync_blockdev(bdev);
245 * sync everything. Start out by waking pdflush, because that writes back
246 * all queues in parallel.
248 static void do_sync(unsigned long wait)
250 wakeup_bdflush(0);
251 sync_inodes(0); /* All mappings, inodes and their blockdevs */
252 DQUOT_SYNC(NULL);
253 sync_supers(); /* Write the superblocks */
254 sync_filesystems(0); /* Start syncing the filesystems */
255 sync_filesystems(wait); /* Waitingly sync the filesystems */
256 sync_inodes(wait); /* Mappings, inodes and blockdevs, again. */
257 if (!wait)
258 printk("Emergency Sync complete\n");
261 asmlinkage long sys_sync(void)
263 do_sync(1);
264 return 0;
267 void emergency_sync(void)
269 pdflush_operation(do_sync, 0);
273 * Generic function to fsync a file.
275 * filp may be NULL if called via the msync of a vma.
278 int file_fsync(struct file *filp, struct dentry *dentry, int datasync)
280 struct inode * inode = dentry->d_inode;
281 struct super_block * sb;
282 int ret;
284 /* sync the inode to buffers */
285 write_inode_now(inode, 0);
287 /* sync the superblock to buffers */
288 sb = inode->i_sb;
289 lock_super(sb);
290 if (sb->s_op->write_super)
291 sb->s_op->write_super(sb);
292 unlock_super(sb);
294 /* .. finally sync the buffers to disk */
295 ret = sync_blockdev(sb->s_bdev);
296 return ret;
299 asmlinkage long sys_fsync(unsigned int fd)
301 struct file * file;
302 struct dentry * dentry;
303 struct inode * inode;
304 int ret, err;
306 ret = -EBADF;
307 file = fget(fd);
308 if (!file)
309 goto out;
311 dentry = file->f_dentry;
312 inode = dentry->d_inode;
314 ret = -EINVAL;
315 if (!file->f_op || !file->f_op->fsync) {
316 /* Why? We can still call filemap_fdatawrite */
317 goto out_putf;
320 /* We need to protect against concurrent writers.. */
321 down(&inode->i_sem);
322 ret = filemap_fdatawrite(inode->i_mapping);
323 err = file->f_op->fsync(file, dentry, 0);
324 if (!ret)
325 ret = err;
326 err = filemap_fdatawait(inode->i_mapping);
327 if (!ret)
328 ret = err;
329 up(&inode->i_sem);
331 out_putf:
332 fput(file);
333 out:
334 return ret;
337 asmlinkage long sys_fdatasync(unsigned int fd)
339 struct file * file;
340 struct dentry * dentry;
341 struct inode * inode;
342 int ret, err;
344 ret = -EBADF;
345 file = fget(fd);
346 if (!file)
347 goto out;
349 dentry = file->f_dentry;
350 inode = dentry->d_inode;
352 ret = -EINVAL;
353 if (!file->f_op || !file->f_op->fsync)
354 goto out_putf;
356 down(&inode->i_sem);
357 ret = filemap_fdatawrite(inode->i_mapping);
358 err = file->f_op->fsync(file, dentry, 1);
359 if (!ret)
360 ret = err;
361 err = filemap_fdatawait(inode->i_mapping);
362 if (!ret)
363 ret = err;
364 up(&inode->i_sem);
366 out_putf:
367 fput(file);
368 out:
369 return ret;
373 * Various filesystems appear to want __find_get_block to be non-blocking.
374 * But it's the page lock which protects the buffers. To get around this,
375 * we get exclusion from try_to_free_buffers with the blockdev mapping's
376 * private_lock.
378 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
379 * may be quite high. This code could TryLock the page, and if that
380 * succeeds, there is no need to take private_lock. (But if
381 * private_lock is contended then so is mapping->page_lock).
383 static struct buffer_head *
384 __find_get_block_slow(struct block_device *bdev, sector_t block, int unused)
386 struct inode *bd_inode = bdev->bd_inode;
387 struct address_space *bd_mapping = bd_inode->i_mapping;
388 struct buffer_head *ret = NULL;
389 unsigned long index;
390 struct buffer_head *bh;
391 struct buffer_head *head;
392 struct page *page;
394 index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
395 page = find_get_page(bd_mapping, index);
396 if (!page)
397 goto out;
399 spin_lock(&bd_mapping->private_lock);
400 if (!page_has_buffers(page))
401 goto out_unlock;
402 head = page_buffers(page);
403 bh = head;
404 do {
405 if (bh->b_blocknr == block) {
406 ret = bh;
407 get_bh(bh);
408 goto out_unlock;
410 bh = bh->b_this_page;
411 } while (bh != head);
412 buffer_error();
413 out_unlock:
414 spin_unlock(&bd_mapping->private_lock);
415 page_cache_release(page);
416 out:
417 return ret;
420 /* If invalidate_buffers() will trash dirty buffers, it means some kind
421 of fs corruption is going on. Trashing dirty data always imply losing
422 information that was supposed to be just stored on the physical layer
423 by the user.
425 Thus invalidate_buffers in general usage is not allwowed to trash
426 dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
427 be preserved. These buffers are simply skipped.
429 We also skip buffers which are still in use. For example this can
430 happen if a userspace program is reading the block device.
432 NOTE: In the case where the user removed a removable-media-disk even if
433 there's still dirty data not synced on disk (due a bug in the device driver
434 or due an error of the user), by not destroying the dirty buffers we could
435 generate corruption also on the next media inserted, thus a parameter is
436 necessary to handle this case in the most safe way possible (trying
437 to not corrupt also the new disk inserted with the data belonging to
438 the old now corrupted disk). Also for the ramdisk the natural thing
439 to do in order to release the ramdisk memory is to destroy dirty buffers.
441 These are two special cases. Normal usage imply the device driver
442 to issue a sync on the device (without waiting I/O completion) and
443 then an invalidate_buffers call that doesn't trash dirty buffers.
445 For handling cache coherency with the blkdev pagecache the 'update' case
446 is been introduced. It is needed to re-read from disk any pinned
447 buffer. NOTE: re-reading from disk is destructive so we can do it only
448 when we assume nobody is changing the buffercache under our I/O and when
449 we think the disk contains more recent information than the buffercache.
450 The update == 1 pass marks the buffers we need to update, the update == 2
451 pass does the actual I/O. */
452 void invalidate_bdev(struct block_device *bdev, int destroy_dirty_buffers)
454 invalidate_bh_lrus();
456 * FIXME: what about destroy_dirty_buffers?
457 * We really want to use invalidate_inode_pages2() for
458 * that, but not until that's cleaned up.
460 invalidate_inode_pages(bdev->bd_inode->i_mapping);
464 * Kick pdflush then try to free up some ZONE_NORMAL memory.
466 static void free_more_memory(void)
468 struct zone *zone;
469 pg_data_t *pgdat;
471 wakeup_bdflush(1024);
472 blk_run_queues();
473 yield();
475 for_each_pgdat(pgdat) {
476 zone = pgdat->node_zonelists[GFP_NOFS&GFP_ZONEMASK].zones[0];
477 if (zone)
478 try_to_free_pages(zone, GFP_NOFS, 0);
483 * I/O completion handler for block_read_full_page() - pages
484 * which come unlocked at the end of I/O.
486 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
488 static spinlock_t page_uptodate_lock = SPIN_LOCK_UNLOCKED;
489 unsigned long flags;
490 struct buffer_head *tmp;
491 struct page *page;
492 int page_uptodate = 1;
494 BUG_ON(!buffer_async_read(bh));
496 page = bh->b_page;
497 if (uptodate) {
498 set_buffer_uptodate(bh);
499 } else {
500 clear_buffer_uptodate(bh);
501 buffer_io_error(bh);
502 SetPageError(page);
506 * Be _very_ careful from here on. Bad things can happen if
507 * two buffer heads end IO at almost the same time and both
508 * decide that the page is now completely done.
510 spin_lock_irqsave(&page_uptodate_lock, flags);
511 clear_buffer_async_read(bh);
512 unlock_buffer(bh);
513 tmp = bh;
514 do {
515 if (!buffer_uptodate(tmp))
516 page_uptodate = 0;
517 if (buffer_async_read(tmp)) {
518 BUG_ON(!buffer_locked(tmp));
519 goto still_busy;
521 tmp = tmp->b_this_page;
522 } while (tmp != bh);
523 spin_unlock_irqrestore(&page_uptodate_lock, flags);
526 * If none of the buffers had errors and they are all
527 * uptodate then we can set the page uptodate.
529 if (page_uptodate && !PageError(page))
530 SetPageUptodate(page);
531 unlock_page(page);
532 return;
534 still_busy:
535 spin_unlock_irqrestore(&page_uptodate_lock, flags);
536 return;
540 * Completion handler for block_write_full_page() - pages which are unlocked
541 * during I/O, and which have PageWriteback cleared upon I/O completion.
543 void end_buffer_async_write(struct buffer_head *bh, int uptodate)
545 static spinlock_t page_uptodate_lock = SPIN_LOCK_UNLOCKED;
546 unsigned long flags;
547 struct buffer_head *tmp;
548 struct page *page;
550 BUG_ON(!buffer_async_write(bh));
552 page = bh->b_page;
553 if (uptodate) {
554 set_buffer_uptodate(bh);
555 } else {
556 buffer_io_error(bh);
557 clear_buffer_uptodate(bh);
558 SetPageError(page);
561 spin_lock_irqsave(&page_uptodate_lock, flags);
562 clear_buffer_async_write(bh);
563 unlock_buffer(bh);
564 tmp = bh->b_this_page;
565 while (tmp != bh) {
566 if (buffer_async_write(tmp)) {
567 BUG_ON(!buffer_locked(tmp));
568 goto still_busy;
570 tmp = tmp->b_this_page;
572 spin_unlock_irqrestore(&page_uptodate_lock, flags);
573 end_page_writeback(page);
574 return;
576 still_busy:
577 spin_unlock_irqrestore(&page_uptodate_lock, flags);
578 return;
582 * If a page's buffers are under async readin (end_buffer_async_read
583 * completion) then there is a possibility that another thread of
584 * control could lock one of the buffers after it has completed
585 * but while some of the other buffers have not completed. This
586 * locked buffer would confuse end_buffer_async_read() into not unlocking
587 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
588 * that this buffer is not under async I/O.
590 * The page comes unlocked when it has no locked buffer_async buffers
591 * left.
593 * PageLocked prevents anyone starting new async I/O reads any of
594 * the buffers.
596 * PageWriteback is used to prevent simultaneous writeout of the same
597 * page.
599 * PageLocked prevents anyone from starting writeback of a page which is
600 * under read I/O (PageWriteback is only ever set against a locked page).
602 void mark_buffer_async_read(struct buffer_head *bh)
604 bh->b_end_io = end_buffer_async_read;
605 set_buffer_async_read(bh);
607 EXPORT_SYMBOL(mark_buffer_async_read);
609 void mark_buffer_async_write(struct buffer_head *bh)
611 bh->b_end_io = end_buffer_async_write;
612 set_buffer_async_write(bh);
614 EXPORT_SYMBOL(mark_buffer_async_write);
618 * fs/buffer.c contains helper functions for buffer-backed address space's
619 * fsync functions. A common requirement for buffer-based filesystems is
620 * that certain data from the backing blockdev needs to be written out for
621 * a successful fsync(). For example, ext2 indirect blocks need to be
622 * written back and waited upon before fsync() returns.
624 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
625 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
626 * management of a list of dependent buffers at ->i_mapping->private_list.
628 * Locking is a little subtle: try_to_free_buffers() will remove buffers
629 * from their controlling inode's queue when they are being freed. But
630 * try_to_free_buffers() will be operating against the *blockdev* mapping
631 * at the time, not against the S_ISREG file which depends on those buffers.
632 * So the locking for private_list is via the private_lock in the address_space
633 * which backs the buffers. Which is different from the address_space
634 * against which the buffers are listed. So for a particular address_space,
635 * mapping->private_lock does *not* protect mapping->private_list! In fact,
636 * mapping->private_list will always be protected by the backing blockdev's
637 * ->private_lock.
639 * Which introduces a requirement: all buffers on an address_space's
640 * ->private_list must be from the same address_space: the blockdev's.
642 * address_spaces which do not place buffers at ->private_list via these
643 * utility functions are free to use private_lock and private_list for
644 * whatever they want. The only requirement is that list_empty(private_list)
645 * be true at clear_inode() time.
647 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
648 * filesystems should do that. invalidate_inode_buffers() should just go
649 * BUG_ON(!list_empty).
651 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
652 * take an address_space, not an inode. And it should be called
653 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
654 * queued up.
656 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
657 * list if it is already on a list. Because if the buffer is on a list,
658 * it *must* already be on the right one. If not, the filesystem is being
659 * silly. This will save a ton of locking. But first we have to ensure
660 * that buffers are taken *off* the old inode's list when they are freed
661 * (presumably in truncate). That requires careful auditing of all
662 * filesystems (do it inside bforget()). It could also be done by bringing
663 * b_inode back.
666 void buffer_insert_list(spinlock_t *lock,
667 struct buffer_head *bh, struct list_head *list)
669 spin_lock(lock);
670 list_move_tail(&bh->b_assoc_buffers, list);
671 spin_unlock(lock);
675 * The buffer's backing address_space's private_lock must be held
677 static inline void __remove_assoc_queue(struct buffer_head *bh)
679 list_del_init(&bh->b_assoc_buffers);
682 int inode_has_buffers(struct inode *inode)
684 return !list_empty(&inode->i_data.private_list);
688 * osync is designed to support O_SYNC io. It waits synchronously for
689 * all already-submitted IO to complete, but does not queue any new
690 * writes to the disk.
692 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
693 * you dirty the buffers, and then use osync_inode_buffers to wait for
694 * completion. Any other dirty buffers which are not yet queued for
695 * write will not be flushed to disk by the osync.
697 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
699 struct buffer_head *bh;
700 struct list_head *p;
701 int err = 0;
703 spin_lock(lock);
704 repeat:
705 list_for_each_prev(p, list) {
706 bh = BH_ENTRY(p);
707 if (buffer_locked(bh)) {
708 get_bh(bh);
709 spin_unlock(lock);
710 wait_on_buffer(bh);
711 if (!buffer_uptodate(bh))
712 err = -EIO;
713 brelse(bh);
714 spin_lock(lock);
715 goto repeat;
718 spin_unlock(lock);
719 return err;
723 * sync_mapping_buffers - write out and wait upon a mapping's "associated"
724 * buffers
725 * @buffer_mapping - the mapping which backs the buffers' data
726 * @mapping - the mapping which wants those buffers written
728 * Starts I/O against the buffers at mapping->private_list, and waits upon
729 * that I/O.
731 * Basically, this is a convenience function for fsync(). @buffer_mapping is
732 * the blockdev which "owns" the buffers and @mapping is a file or directory
733 * which needs those buffers to be written for a successful fsync().
735 int sync_mapping_buffers(struct address_space *mapping)
737 struct address_space *buffer_mapping = mapping->assoc_mapping;
739 if (buffer_mapping == NULL || list_empty(&mapping->private_list))
740 return 0;
742 return fsync_buffers_list(&buffer_mapping->private_lock,
743 &mapping->private_list);
745 EXPORT_SYMBOL(sync_mapping_buffers);
748 * Called when we've recently written block `bblock', and it is known that
749 * `bblock' was for a buffer_boundary() buffer. This means that the block at
750 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
751 * dirty, schedule it for IO. So that indirects merge nicely with their data.
753 void write_boundary_block(struct block_device *bdev,
754 sector_t bblock, unsigned blocksize)
756 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
757 if (bh) {
758 if (buffer_dirty(bh))
759 ll_rw_block(WRITE, 1, &bh);
760 put_bh(bh);
764 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
766 struct address_space *mapping = inode->i_mapping;
767 struct address_space *buffer_mapping = bh->b_page->mapping;
769 mark_buffer_dirty(bh);
770 if (!mapping->assoc_mapping) {
771 mapping->assoc_mapping = buffer_mapping;
772 } else {
773 if (mapping->assoc_mapping != buffer_mapping)
774 BUG();
776 if (list_empty(&bh->b_assoc_buffers))
777 buffer_insert_list(&buffer_mapping->private_lock,
778 bh, &mapping->private_list);
780 EXPORT_SYMBOL(mark_buffer_dirty_inode);
783 * Add a page to the dirty page list.
785 * It is a sad fact of life that this function is called from several places
786 * deeply under spinlocking. It may not sleep.
788 * If the page has buffers, the uptodate buffers are set dirty, to preserve
789 * dirty-state coherency between the page and the buffers. It the page does
790 * not have buffers then when they are later attached they will all be set
791 * dirty.
793 * The buffers are dirtied before the page is dirtied. There's a small race
794 * window in which a writepage caller may see the page cleanness but not the
795 * buffer dirtiness. That's fine. If this code were to set the page dirty
796 * before the buffers, a concurrent writepage caller could clear the page dirty
797 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
798 * page on the dirty page list.
800 * There is also a small window where the page is dirty, and not on dirty_pages.
801 * Also a possibility that by the time the page is added to dirty_pages, it has
802 * been set clean. The page lists are somewhat approximate in this regard.
803 * It's better to have clean pages accidentally attached to dirty_pages than to
804 * leave dirty pages attached to clean_pages.
806 * We use private_lock to lock against try_to_free_buffers while using the
807 * page's buffer list. Also use this to protect against clean buffers being
808 * added to the page after it was set dirty.
810 * FIXME: may need to call ->reservepage here as well. That's rather up to the
811 * address_space though.
813 * For now, we treat swapper_space specially. It doesn't use the normal
814 * block a_ops.
816 int __set_page_dirty_buffers(struct page *page)
818 struct address_space * const mapping = page->mapping;
819 int ret = 0;
821 if (mapping == NULL) {
822 SetPageDirty(page);
823 goto out;
826 spin_lock(&mapping->private_lock);
827 if (page_has_buffers(page)) {
828 struct buffer_head *head = page_buffers(page);
829 struct buffer_head *bh = head;
831 do {
832 if (buffer_uptodate(bh))
833 set_buffer_dirty(bh);
834 else
835 buffer_error();
836 bh = bh->b_this_page;
837 } while (bh != head);
839 spin_unlock(&mapping->private_lock);
841 if (!TestSetPageDirty(page)) {
842 spin_lock(&mapping->page_lock);
843 if (page->mapping) { /* Race with truncate? */
844 if (!mapping->backing_dev_info->memory_backed)
845 inc_page_state(nr_dirty);
846 list_del(&page->list);
847 list_add(&page->list, &mapping->dirty_pages);
849 spin_unlock(&mapping->page_lock);
850 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
853 out:
854 return ret;
856 EXPORT_SYMBOL(__set_page_dirty_buffers);
859 * Write out and wait upon a list of buffers.
861 * We have conflicting pressures: we want to make sure that all
862 * initially dirty buffers get waited on, but that any subsequently
863 * dirtied buffers don't. After all, we don't want fsync to last
864 * forever if somebody is actively writing to the file.
866 * Do this in two main stages: first we copy dirty buffers to a
867 * temporary inode list, queueing the writes as we go. Then we clean
868 * up, waiting for those writes to complete.
870 * During this second stage, any subsequent updates to the file may end
871 * up refiling the buffer on the original inode's dirty list again, so
872 * there is a chance we will end up with a buffer queued for write but
873 * not yet completed on that list. So, as a final cleanup we go through
874 * the osync code to catch these locked, dirty buffers without requeuing
875 * any newly dirty buffers for write.
877 int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
879 struct buffer_head *bh;
880 struct list_head tmp;
881 int err = 0, err2;
883 INIT_LIST_HEAD(&tmp);
885 spin_lock(lock);
886 while (!list_empty(list)) {
887 bh = BH_ENTRY(list->next);
888 list_del_init(&bh->b_assoc_buffers);
889 if (buffer_dirty(bh) || buffer_locked(bh)) {
890 list_add(&bh->b_assoc_buffers, &tmp);
891 if (buffer_dirty(bh)) {
892 get_bh(bh);
893 spin_unlock(lock);
895 * Ensure any pending I/O completes so that
896 * ll_rw_block() actually writes the current
897 * contents - it is a noop if I/O is still in
898 * flight on potentially older contents.
900 wait_on_buffer(bh);
901 ll_rw_block(WRITE, 1, &bh);
902 brelse(bh);
903 spin_lock(lock);
908 while (!list_empty(&tmp)) {
909 bh = BH_ENTRY(tmp.prev);
910 __remove_assoc_queue(bh);
911 get_bh(bh);
912 spin_unlock(lock);
913 wait_on_buffer(bh);
914 if (!buffer_uptodate(bh))
915 err = -EIO;
916 brelse(bh);
917 spin_lock(lock);
920 spin_unlock(lock);
921 err2 = osync_buffers_list(lock, list);
922 if (err)
923 return err;
924 else
925 return err2;
929 * Invalidate any and all dirty buffers on a given inode. We are
930 * probably unmounting the fs, but that doesn't mean we have already
931 * done a sync(). Just drop the buffers from the inode list.
933 * NOTE: we take the inode's blockdev's mapping's private_lock. Which
934 * assumes that all the buffers are against the blockdev. Not true
935 * for reiserfs.
937 void invalidate_inode_buffers(struct inode *inode)
939 if (inode_has_buffers(inode)) {
940 struct address_space *mapping = &inode->i_data;
941 struct list_head *list = &mapping->private_list;
942 struct address_space *buffer_mapping = mapping->assoc_mapping;
944 spin_lock(&buffer_mapping->private_lock);
945 while (!list_empty(list))
946 __remove_assoc_queue(BH_ENTRY(list->next));
947 spin_unlock(&buffer_mapping->private_lock);
952 * Remove any clean buffers from the inode's buffer list. This is called
953 * when we're trying to free the inode itself. Those buffers can pin it.
955 * Returns true if all buffers were removed.
957 int remove_inode_buffers(struct inode *inode)
959 int ret = 1;
961 if (inode_has_buffers(inode)) {
962 struct address_space *mapping = &inode->i_data;
963 struct list_head *list = &mapping->private_list;
964 struct address_space *buffer_mapping = mapping->assoc_mapping;
966 spin_lock(&buffer_mapping->private_lock);
967 while (!list_empty(list)) {
968 struct buffer_head *bh = BH_ENTRY(list->next);
969 if (buffer_dirty(bh)) {
970 ret = 0;
971 break;
973 __remove_assoc_queue(bh);
975 spin_unlock(&buffer_mapping->private_lock);
977 return ret;
981 * Create the appropriate buffers when given a page for data area and
982 * the size of each buffer.. Use the bh->b_this_page linked list to
983 * follow the buffers created. Return NULL if unable to create more
984 * buffers.
986 * The retry flag is used to differentiate async IO (paging, swapping)
987 * which may not fail from ordinary buffer allocations.
989 static struct buffer_head *
990 create_buffers(struct page * page, unsigned long size, int retry)
992 struct buffer_head *bh, *head;
993 long offset;
995 try_again:
996 head = NULL;
997 offset = PAGE_SIZE;
998 while ((offset -= size) >= 0) {
999 bh = alloc_buffer_head(GFP_NOFS);
1000 if (!bh)
1001 goto no_grow;
1003 bh->b_bdev = NULL;
1004 bh->b_this_page = head;
1005 bh->b_blocknr = -1;
1006 head = bh;
1008 bh->b_state = 0;
1009 atomic_set(&bh->b_count, 0);
1010 bh->b_size = size;
1012 /* Link the buffer to its page */
1013 set_bh_page(bh, page, offset);
1015 bh->b_end_io = NULL;
1017 return head;
1019 * In case anything failed, we just free everything we got.
1021 no_grow:
1022 if (head) {
1023 do {
1024 bh = head;
1025 head = head->b_this_page;
1026 free_buffer_head(bh);
1027 } while (head);
1031 * Return failure for non-async IO requests. Async IO requests
1032 * are not allowed to fail, so we have to wait until buffer heads
1033 * become available. But we don't want tasks sleeping with
1034 * partially complete buffers, so all were released above.
1036 if (!retry)
1037 return NULL;
1039 /* We're _really_ low on memory. Now we just
1040 * wait for old buffer heads to become free due to
1041 * finishing IO. Since this is an async request and
1042 * the reserve list is empty, we're sure there are
1043 * async buffer heads in use.
1045 free_more_memory();
1046 goto try_again;
1049 static inline void
1050 link_dev_buffers(struct page *page, struct buffer_head *head)
1052 struct buffer_head *bh, *tail;
1054 bh = head;
1055 do {
1056 tail = bh;
1057 bh = bh->b_this_page;
1058 } while (bh);
1059 tail->b_this_page = head;
1060 __set_page_buffers(page, head);
1064 * Initialise the state of a blockdev page's buffers.
1066 static void
1067 init_page_buffers(struct page *page, struct block_device *bdev,
1068 int block, int size)
1070 struct buffer_head *head = page_buffers(page);
1071 struct buffer_head *bh = head;
1072 unsigned int b_state;
1074 b_state = 1 << BH_Mapped;
1075 if (PageUptodate(page))
1076 b_state |= 1 << BH_Uptodate;
1078 do {
1079 if (!(bh->b_state & (1 << BH_Mapped))) {
1080 init_buffer(bh, NULL, NULL);
1081 bh->b_bdev = bdev;
1082 bh->b_blocknr = block;
1083 bh->b_state = b_state;
1085 block++;
1086 bh = bh->b_this_page;
1087 } while (bh != head);
1091 * Create the page-cache page that contains the requested block.
1093 * This is user purely for blockdev mappings.
1095 static struct page *
1096 grow_dev_page(struct block_device *bdev, unsigned long block,
1097 unsigned long index, int size)
1099 struct inode *inode = bdev->bd_inode;
1100 struct page *page;
1101 struct buffer_head *bh;
1103 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
1104 if (!page)
1105 return NULL;
1107 if (!PageLocked(page))
1108 BUG();
1110 if (page_has_buffers(page)) {
1111 bh = page_buffers(page);
1112 if (bh->b_size == size)
1113 return page;
1114 if (!try_to_free_buffers(page))
1115 goto failed;
1119 * Allocate some buffers for this page
1121 bh = create_buffers(page, size, 0);
1122 if (!bh)
1123 goto failed;
1126 * Link the page to the buffers and initialise them. Take the
1127 * lock to be atomic wrt __find_get_block(), which does not
1128 * run under the page lock.
1130 spin_lock(&inode->i_mapping->private_lock);
1131 link_dev_buffers(page, bh);
1132 init_page_buffers(page, bdev, block, size);
1133 spin_unlock(&inode->i_mapping->private_lock);
1134 return page;
1136 failed:
1137 buffer_error();
1138 unlock_page(page);
1139 page_cache_release(page);
1140 return NULL;
1144 * Create buffers for the specified block device block's page. If
1145 * that page was dirty, the buffers are set dirty also.
1147 * Except that's a bug. Attaching dirty buffers to a dirty
1148 * blockdev's page can result in filesystem corruption, because
1149 * some of those buffers may be aliases of filesystem data.
1150 * grow_dev_page() will go BUG() if this happens.
1152 static inline int
1153 grow_buffers(struct block_device *bdev, unsigned long block, int size)
1155 struct page *page;
1156 unsigned long index;
1157 int sizebits;
1159 /* Size must be multiple of hard sectorsize */
1160 if (size & (bdev_hardsect_size(bdev)-1))
1161 BUG();
1162 if (size < 512 || size > PAGE_SIZE)
1163 BUG();
1165 sizebits = -1;
1166 do {
1167 sizebits++;
1168 } while ((size << sizebits) < PAGE_SIZE);
1170 index = block >> sizebits;
1171 block = index << sizebits;
1173 /* Create a page with the proper size buffers.. */
1174 page = grow_dev_page(bdev, block, index, size);
1175 if (!page)
1176 return 0;
1177 unlock_page(page);
1178 page_cache_release(page);
1179 return 1;
1182 struct buffer_head *
1183 __getblk_slow(struct block_device *bdev, sector_t block, int size)
1185 for (;;) {
1186 struct buffer_head * bh;
1188 bh = __find_get_block(bdev, block, size);
1189 if (bh)
1190 return bh;
1192 if (!grow_buffers(bdev, block, size))
1193 free_more_memory();
1198 * The relationship between dirty buffers and dirty pages:
1200 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1201 * the page appears on its address_space.dirty_pages list.
1203 * At all times, the dirtiness of the buffers represents the dirtiness of
1204 * subsections of the page. If the page has buffers, the page dirty bit is
1205 * merely a hint about the true dirty state.
1207 * When a page is set dirty in its entirety, all its buffers are marked dirty
1208 * (if the page has buffers).
1210 * When a buffer is marked dirty, its page is dirtied, but the page's other
1211 * buffers are not.
1213 * Also. When blockdev buffers are explicitly read with bread(), they
1214 * individually become uptodate. But their backing page remains not
1215 * uptodate - even if all of its buffers are uptodate. A subsequent
1216 * block_read_full_page() against that page will discover all the uptodate
1217 * buffers, will set the page uptodate and will perform no I/O.
1221 * mark_buffer_dirty - mark a buffer_head as needing writeout
1223 * mark_buffer_dirty() will set the dirty bit against the buffer,
1224 * then set its backing page dirty, then attach the page to its
1225 * address_space's dirty_pages list and then attach the address_space's
1226 * inode to its superblock's dirty inode list.
1228 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1229 * mapping->page_lock and the global inode_lock.
1231 void mark_buffer_dirty(struct buffer_head *bh)
1233 if (!buffer_uptodate(bh))
1234 buffer_error();
1235 if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
1236 __set_page_dirty_nobuffers(bh->b_page);
1240 * Decrement a buffer_head's reference count. If all buffers against a page
1241 * have zero reference count, are clean and unlocked, and if the page is clean
1242 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1243 * in preparation for freeing it (sometimes, rarely, buffers are removed from
1244 * a page but it ends up not being freed, and buffers may later be reattached).
1246 void __brelse(struct buffer_head * buf)
1248 if (atomic_read(&buf->b_count)) {
1249 put_bh(buf);
1250 return;
1252 printk(KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1253 buffer_error(); /* For the stack backtrace */
1257 * bforget() is like brelse(), except it discards any
1258 * potentially dirty data.
1260 void __bforget(struct buffer_head *bh)
1262 clear_buffer_dirty(bh);
1263 if (!list_empty(&bh->b_assoc_buffers)) {
1264 struct address_space *buffer_mapping = bh->b_page->mapping;
1266 spin_lock(&buffer_mapping->private_lock);
1267 list_del_init(&bh->b_assoc_buffers);
1268 spin_unlock(&buffer_mapping->private_lock);
1270 __brelse(bh);
1273 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1275 lock_buffer(bh);
1276 if (buffer_uptodate(bh)) {
1277 unlock_buffer(bh);
1278 return bh;
1279 } else {
1280 if (buffer_dirty(bh))
1281 buffer_error();
1282 get_bh(bh);
1283 bh->b_end_io = end_buffer_io_sync;
1284 submit_bh(READ, bh);
1285 wait_on_buffer(bh);
1286 if (buffer_uptodate(bh))
1287 return bh;
1289 brelse(bh);
1290 return NULL;
1294 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1295 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1296 * refcount elevated by one when they're in an LRU. A buffer can only appear
1297 * once in a particular CPU's LRU. A single buffer can be present in multiple
1298 * CPU's LRUs at the same time.
1300 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1301 * sb_find_get_block().
1303 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1304 * a local interrupt disable for that.
1307 #define BH_LRU_SIZE 8
1309 struct bh_lru {
1310 struct buffer_head *bhs[BH_LRU_SIZE];
1313 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{0}};
1315 #ifdef CONFIG_SMP
1316 #define bh_lru_lock() local_irq_disable()
1317 #define bh_lru_unlock() local_irq_enable()
1318 #else
1319 #define bh_lru_lock() preempt_disable()
1320 #define bh_lru_unlock() preempt_enable()
1321 #endif
1323 static inline void check_irqs_on(void)
1325 #ifdef irqs_disabled
1326 BUG_ON(irqs_disabled());
1327 #endif
1331 * The LRU management algorithm is dopey-but-simple. Sorry.
1333 static void bh_lru_install(struct buffer_head *bh)
1335 struct buffer_head *evictee = NULL;
1336 struct bh_lru *lru;
1338 check_irqs_on();
1339 bh_lru_lock();
1340 lru = &__get_cpu_var(bh_lrus);
1341 if (lru->bhs[0] != bh) {
1342 struct buffer_head *bhs[BH_LRU_SIZE];
1343 int in;
1344 int out = 0;
1346 get_bh(bh);
1347 bhs[out++] = bh;
1348 for (in = 0; in < BH_LRU_SIZE; in++) {
1349 struct buffer_head *bh2 = lru->bhs[in];
1351 if (bh2 == bh) {
1352 __brelse(bh2);
1353 } else {
1354 if (out >= BH_LRU_SIZE) {
1355 BUG_ON(evictee != NULL);
1356 evictee = bh2;
1357 } else {
1358 bhs[out++] = bh2;
1362 while (out < BH_LRU_SIZE)
1363 bhs[out++] = NULL;
1364 memcpy(lru->bhs, bhs, sizeof(bhs));
1366 bh_lru_unlock();
1368 if (evictee)
1369 __brelse(evictee);
1373 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1375 static inline struct buffer_head *
1376 lookup_bh_lru(struct block_device *bdev, sector_t block, int size)
1378 struct buffer_head *ret = NULL;
1379 struct bh_lru *lru;
1380 int i;
1382 check_irqs_on();
1383 bh_lru_lock();
1384 lru = &__get_cpu_var(bh_lrus);
1385 for (i = 0; i < BH_LRU_SIZE; i++) {
1386 struct buffer_head *bh = lru->bhs[i];
1388 if (bh && bh->b_bdev == bdev &&
1389 bh->b_blocknr == block && bh->b_size == size) {
1390 if (i) {
1391 while (i) {
1392 lru->bhs[i] = lru->bhs[i - 1];
1393 i--;
1395 lru->bhs[0] = bh;
1397 get_bh(bh);
1398 ret = bh;
1399 break;
1402 bh_lru_unlock();
1403 return ret;
1407 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1408 * it in the LRU and mark it as accessed. If it is not present then return
1409 * NULL
1411 struct buffer_head *
1412 __find_get_block(struct block_device *bdev, sector_t block, int size)
1414 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1416 if (bh == NULL) {
1417 bh = __find_get_block_slow(bdev, block, size);
1418 if (bh)
1419 bh_lru_install(bh);
1421 if (bh)
1422 touch_buffer(bh);
1423 return bh;
1425 EXPORT_SYMBOL(__find_get_block);
1428 * __getblk will locate (and, if necessary, create) the buffer_head
1429 * which corresponds to the passed block_device, block and size. The
1430 * returned buffer has its reference count incremented.
1432 * __getblk() cannot fail - it just keeps trying. If you pass it an
1433 * illegal block number, __getblk() will happily return a buffer_head
1434 * which represents the non-existent block. Very weird.
1436 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1437 * attempt is failing. FIXME, perhaps?
1439 struct buffer_head *
1440 __getblk(struct block_device *bdev, sector_t block, int size)
1442 struct buffer_head *bh = __find_get_block(bdev, block, size);
1444 if (bh == NULL)
1445 bh = __getblk_slow(bdev, block, size);
1446 return bh;
1448 EXPORT_SYMBOL(__getblk);
1451 * __bread() - reads a specified block and returns the bh
1452 * @block: number of block
1453 * @size: size (in bytes) to read
1455 * Reads a specified block, and returns buffer head that contains it.
1456 * It returns NULL if the block was unreadable.
1458 struct buffer_head *
1459 __bread(struct block_device *bdev, sector_t block, int size)
1461 struct buffer_head *bh = __getblk(bdev, block, size);
1463 if (!buffer_uptodate(bh))
1464 bh = __bread_slow(bh);
1465 return bh;
1467 EXPORT_SYMBOL(__bread);
1470 * invalidate_bh_lrus() is called rarely - at unmount. Because it is only for
1471 * unmount it only needs to ensure that all buffers from the target device are
1472 * invalidated on return and it doesn't need to worry about new buffers from
1473 * that device being added - the unmount code has to prevent that.
1475 static void invalidate_bh_lru(void *arg)
1477 struct bh_lru *b = &get_cpu_var(bh_lrus);
1478 int i;
1480 for (i = 0; i < BH_LRU_SIZE; i++) {
1481 brelse(b->bhs[i]);
1482 b->bhs[i] = NULL;
1484 put_cpu_var(bh_lrus);
1487 static void invalidate_bh_lrus(void)
1489 on_each_cpu(invalidate_bh_lru, NULL, 1, 1);
1492 void set_bh_page(struct buffer_head *bh,
1493 struct page *page, unsigned long offset)
1495 bh->b_page = page;
1496 if (offset >= PAGE_SIZE)
1497 BUG();
1498 if (PageHighMem(page))
1500 * This catches illegal uses and preserves the offset:
1502 bh->b_data = (char *)(0 + offset);
1503 else
1504 bh->b_data = page_address(page) + offset;
1506 EXPORT_SYMBOL(set_bh_page);
1509 * Called when truncating a buffer on a page completely.
1511 static inline void discard_buffer(struct buffer_head * bh)
1513 lock_buffer(bh);
1514 clear_buffer_dirty(bh);
1515 bh->b_bdev = NULL;
1516 clear_buffer_mapped(bh);
1517 clear_buffer_req(bh);
1518 clear_buffer_new(bh);
1519 clear_buffer_delay(bh);
1520 unlock_buffer(bh);
1524 * try_to_release_page() - release old fs-specific metadata on a page
1526 * @page: the page which the kernel is trying to free
1527 * @gfp_mask: memory allocation flags (and I/O mode)
1529 * The address_space is to try to release any data against the page
1530 * (presumably at page->private). If the release was successful, return `1'.
1531 * Otherwise return zero.
1533 * The @gfp_mask argument specifies whether I/O may be performed to release
1534 * this page (__GFP_IO), and whether the call may block (__GFP_WAIT).
1536 * NOTE: @gfp_mask may go away, and this function may become non-blocking.
1538 int try_to_release_page(struct page *page, int gfp_mask)
1540 struct address_space * const mapping = page->mapping;
1542 if (!PageLocked(page))
1543 BUG();
1544 if (PageWriteback(page))
1545 return 0;
1547 if (mapping && mapping->a_ops->releasepage)
1548 return mapping->a_ops->releasepage(page, gfp_mask);
1549 return try_to_free_buffers(page);
1553 * block_invalidatepage - invalidate part of all of a buffer-backed page
1555 * @page: the page which is affected
1556 * @offset: the index of the truncation point
1558 * block_invalidatepage() is called when all or part of the page has become
1559 * invalidatedby a truncate operation.
1561 * block_invalidatepage() does not have to release all buffers, but it must
1562 * ensure that no dirty buffer is left outside @offset and that no I/O
1563 * is underway against any of the blocks which are outside the truncation
1564 * point. Because the caller is about to free (and possibly reuse) those
1565 * blocks on-disk.
1567 int block_invalidatepage(struct page *page, unsigned long offset)
1569 struct buffer_head *head, *bh, *next;
1570 unsigned int curr_off = 0;
1571 int ret = 1;
1573 BUG_ON(!PageLocked(page));
1574 if (!page_has_buffers(page))
1575 goto out;
1577 head = page_buffers(page);
1578 bh = head;
1579 do {
1580 unsigned int next_off = curr_off + bh->b_size;
1581 next = bh->b_this_page;
1584 * is this block fully invalidated?
1586 if (offset <= curr_off)
1587 discard_buffer(bh);
1588 curr_off = next_off;
1589 bh = next;
1590 } while (bh != head);
1593 * We release buffers only if the entire page is being invalidated.
1594 * The get_block cached value has been unconditionally invalidated,
1595 * so real IO is not possible anymore.
1597 if (offset == 0)
1598 ret = try_to_release_page(page, 0);
1599 out:
1600 return ret;
1602 EXPORT_SYMBOL(block_invalidatepage);
1605 * We attach and possibly dirty the buffers atomically wrt
1606 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
1607 * is already excluded via the page lock.
1609 void create_empty_buffers(struct page *page,
1610 unsigned long blocksize, unsigned long b_state)
1612 struct buffer_head *bh, *head, *tail;
1614 head = create_buffers(page, blocksize, 1);
1615 bh = head;
1616 do {
1617 bh->b_state |= b_state;
1618 tail = bh;
1619 bh = bh->b_this_page;
1620 } while (bh);
1621 tail->b_this_page = head;
1623 spin_lock(&page->mapping->private_lock);
1624 if (PageUptodate(page) || PageDirty(page)) {
1625 bh = head;
1626 do {
1627 if (PageDirty(page))
1628 set_buffer_dirty(bh);
1629 if (PageUptodate(page))
1630 set_buffer_uptodate(bh);
1631 bh = bh->b_this_page;
1632 } while (bh != head);
1634 __set_page_buffers(page, head);
1635 spin_unlock(&page->mapping->private_lock);
1637 EXPORT_SYMBOL(create_empty_buffers);
1640 * We are taking a block for data and we don't want any output from any
1641 * buffer-cache aliases starting from return from that function and
1642 * until the moment when something will explicitly mark the buffer
1643 * dirty (hopefully that will not happen until we will free that block ;-)
1644 * We don't even need to mark it not-uptodate - nobody can expect
1645 * anything from a newly allocated buffer anyway. We used to used
1646 * unmap_buffer() for such invalidation, but that was wrong. We definitely
1647 * don't want to mark the alias unmapped, for example - it would confuse
1648 * anyone who might pick it with bread() afterwards...
1650 * Also.. Note that bforget() doesn't lock the buffer. So there can
1651 * be writeout I/O going on against recently-freed buffers. We don't
1652 * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1653 * only if we really need to. That happens here.
1655 void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1657 struct buffer_head *old_bh;
1659 old_bh = __find_get_block_slow(bdev, block, 0);
1660 if (old_bh) {
1661 #if 0 /* This happens. Later. */
1662 if (buffer_dirty(old_bh))
1663 buffer_error();
1664 #endif
1665 clear_buffer_dirty(old_bh);
1666 wait_on_buffer(old_bh);
1667 clear_buffer_req(old_bh);
1668 __brelse(old_bh);
1671 EXPORT_SYMBOL(unmap_underlying_metadata);
1674 * NOTE! All mapped/uptodate combinations are valid:
1676 * Mapped Uptodate Meaning
1678 * No No "unknown" - must do get_block()
1679 * No Yes "hole" - zero-filled
1680 * Yes No "allocated" - allocated on disk, not read in
1681 * Yes Yes "valid" - allocated and up-to-date in memory.
1683 * "Dirty" is valid only with the last case (mapped+uptodate).
1687 * While block_write_full_page is writing back the dirty buffers under
1688 * the page lock, whoever dirtied the buffers may decide to clean them
1689 * again at any time. We handle that by only looking at the buffer
1690 * state inside lock_buffer().
1692 * If block_write_full_page() is called for regular writeback
1693 * (called_for_sync() is false) then it will redirty a page which has a locked
1694 * buffer. This only can happen if someone has written the buffer directly,
1695 * with submit_bh(). At the address_space level PageWriteback prevents this
1696 * contention from occurring.
1698 static int __block_write_full_page(struct inode *inode, struct page *page,
1699 get_block_t *get_block, struct writeback_control *wbc)
1701 int err;
1702 unsigned long block;
1703 unsigned long last_block;
1704 struct buffer_head *bh, *head;
1705 int nr_underway = 0;
1707 BUG_ON(!PageLocked(page));
1709 last_block = (inode->i_size - 1) >> inode->i_blkbits;
1711 if (!page_has_buffers(page)) {
1712 if (!PageUptodate(page))
1713 buffer_error();
1714 create_empty_buffers(page, 1 << inode->i_blkbits,
1715 (1 << BH_Dirty)|(1 << BH_Uptodate));
1719 * Be very careful. We have no exclusion from __set_page_dirty_buffers
1720 * here, and the (potentially unmapped) buffers may become dirty at
1721 * any time. If a buffer becomes dirty here after we've inspected it
1722 * then we just miss that fact, and the page stays dirty.
1724 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1725 * handle that here by just cleaning them.
1728 block = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1729 head = page_buffers(page);
1730 bh = head;
1733 * Get all the dirty buffers mapped to disk addresses and
1734 * handle any aliases from the underlying blockdev's mapping.
1736 do {
1737 if (block > last_block) {
1739 * mapped buffers outside i_size will occur, because
1740 * this page can be outside i_size when there is a
1741 * truncate in progress.
1743 * if (buffer_mapped(bh))
1744 * buffer_error();
1747 * The buffer was zeroed by block_write_full_page()
1749 clear_buffer_dirty(bh);
1750 set_buffer_uptodate(bh);
1751 } else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
1752 if (buffer_new(bh))
1753 buffer_error();
1754 err = get_block(inode, block, bh, 1);
1755 if (err)
1756 goto recover;
1757 if (buffer_new(bh)) {
1758 /* blockdev mappings never come here */
1759 clear_buffer_new(bh);
1760 unmap_underlying_metadata(bh->b_bdev,
1761 bh->b_blocknr);
1764 bh = bh->b_this_page;
1765 block++;
1766 } while (bh != head);
1768 do {
1769 get_bh(bh);
1770 if (buffer_mapped(bh) && buffer_dirty(bh)) {
1771 if (wbc->sync_mode != WB_SYNC_NONE) {
1772 lock_buffer(bh);
1773 } else {
1774 if (test_set_buffer_locked(bh)) {
1775 __set_page_dirty_nobuffers(page);
1776 continue;
1779 if (test_clear_buffer_dirty(bh)) {
1780 if (!buffer_uptodate(bh))
1781 buffer_error();
1782 mark_buffer_async_write(bh);
1783 } else {
1784 unlock_buffer(bh);
1787 } while ((bh = bh->b_this_page) != head);
1789 BUG_ON(PageWriteback(page));
1790 SetPageWriteback(page); /* Keeps try_to_free_buffers() away */
1791 unlock_page(page);
1794 * The page may come unlocked any time after the *first* submit_bh()
1795 * call. Be careful with its buffers.
1797 do {
1798 struct buffer_head *next = bh->b_this_page;
1799 if (buffer_async_write(bh)) {
1800 submit_bh(WRITE, bh);
1801 nr_underway++;
1803 put_bh(bh);
1804 bh = next;
1805 } while (bh != head);
1807 err = 0;
1808 done:
1809 if (nr_underway == 0) {
1811 * The page was marked dirty, but the buffers were
1812 * clean. Someone wrote them back by hand with
1813 * ll_rw_block/submit_bh. A rare case.
1815 int uptodate = 1;
1816 do {
1817 if (!buffer_uptodate(bh)) {
1818 uptodate = 0;
1819 break;
1821 bh = bh->b_this_page;
1822 } while (bh != head);
1823 if (uptodate)
1824 SetPageUptodate(page);
1825 end_page_writeback(page);
1827 return err;
1829 recover:
1831 * ENOSPC, or some other error. We may already have added some
1832 * blocks to the file, so we need to write these out to avoid
1833 * exposing stale data.
1834 * The page is currently locked and not marked for writeback
1836 bh = head;
1837 /* Recovery: lock and submit the mapped buffers */
1838 do {
1839 get_bh(bh);
1840 if (buffer_mapped(bh) && buffer_dirty(bh)) {
1841 lock_buffer(bh);
1842 mark_buffer_async_write(bh);
1843 } else {
1845 * The buffer may have been set dirty during
1846 * attachment to a dirty page.
1848 clear_buffer_dirty(bh);
1850 } while ((bh = bh->b_this_page) != head);
1851 SetPageError(page);
1852 BUG_ON(PageWriteback(page));
1853 SetPageWriteback(page);
1854 unlock_page(page);
1855 do {
1856 struct buffer_head *next = bh->b_this_page;
1857 if (buffer_async_write(bh)) {
1858 clear_buffer_dirty(bh);
1859 submit_bh(WRITE, bh);
1860 nr_underway++;
1862 put_bh(bh);
1863 bh = next;
1864 } while (bh != head);
1865 goto done;
1868 static int __block_prepare_write(struct inode *inode, struct page *page,
1869 unsigned from, unsigned to, get_block_t *get_block)
1871 unsigned block_start, block_end;
1872 sector_t block;
1873 int err = 0;
1874 unsigned blocksize, bbits;
1875 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1877 BUG_ON(!PageLocked(page));
1878 BUG_ON(from > PAGE_CACHE_SIZE);
1879 BUG_ON(to > PAGE_CACHE_SIZE);
1880 BUG_ON(from > to);
1882 blocksize = 1 << inode->i_blkbits;
1883 if (!page_has_buffers(page))
1884 create_empty_buffers(page, blocksize, 0);
1885 head = page_buffers(page);
1887 bbits = inode->i_blkbits;
1888 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1890 for(bh = head, block_start = 0; bh != head || !block_start;
1891 block++, block_start=block_end, bh = bh->b_this_page) {
1892 block_end = block_start + blocksize;
1893 if (block_end <= from || block_start >= to) {
1894 if (PageUptodate(page)) {
1895 if (!buffer_uptodate(bh))
1896 set_buffer_uptodate(bh);
1898 continue;
1900 if (buffer_new(bh))
1901 clear_buffer_new(bh);
1902 if (!buffer_mapped(bh)) {
1903 err = get_block(inode, block, bh, 1);
1904 if (err)
1905 goto out;
1906 if (buffer_new(bh)) {
1907 clear_buffer_new(bh);
1908 unmap_underlying_metadata(bh->b_bdev,
1909 bh->b_blocknr);
1910 if (PageUptodate(page)) {
1911 if (!buffer_mapped(bh))
1912 buffer_error();
1913 set_buffer_uptodate(bh);
1914 continue;
1916 if (block_end > to || block_start < from) {
1917 void *kaddr;
1919 kaddr = kmap_atomic(page, KM_USER0);
1920 if (block_end > to)
1921 memset(kaddr+to, 0,
1922 block_end-to);
1923 if (block_start < from)
1924 memset(kaddr+block_start,
1925 0, from-block_start);
1926 flush_dcache_page(page);
1927 kunmap_atomic(kaddr, KM_USER0);
1929 continue;
1932 if (PageUptodate(page)) {
1933 if (!buffer_uptodate(bh))
1934 set_buffer_uptodate(bh);
1935 continue;
1937 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1938 (block_start < from || block_end > to)) {
1939 ll_rw_block(READ, 1, &bh);
1940 *wait_bh++=bh;
1944 * If we issued read requests - let them complete.
1946 while(wait_bh > wait) {
1947 wait_on_buffer(*--wait_bh);
1948 if (!buffer_uptodate(*wait_bh))
1949 return -EIO;
1951 return 0;
1952 out:
1954 * Zero out any newly allocated blocks to avoid exposing stale
1955 * data. If BH_New is set, we know that the block was newly
1956 * allocated in the above loop.
1958 bh = head;
1959 block_start = 0;
1960 do {
1961 block_end = block_start+blocksize;
1962 if (block_end <= from)
1963 goto next_bh;
1964 if (block_start >= to)
1965 break;
1966 if (buffer_new(bh)) {
1967 void *kaddr;
1969 clear_buffer_new(bh);
1970 if (buffer_uptodate(bh))
1971 buffer_error();
1972 kaddr = kmap_atomic(page, KM_USER0);
1973 memset(kaddr+block_start, 0, bh->b_size);
1974 kunmap_atomic(kaddr, KM_USER0);
1975 set_buffer_uptodate(bh);
1976 mark_buffer_dirty(bh);
1978 next_bh:
1979 block_start = block_end;
1980 bh = bh->b_this_page;
1981 } while (bh != head);
1982 return err;
1985 static int __block_commit_write(struct inode *inode, struct page *page,
1986 unsigned from, unsigned to)
1988 unsigned block_start, block_end;
1989 int partial = 0;
1990 unsigned blocksize;
1991 struct buffer_head *bh, *head;
1993 blocksize = 1 << inode->i_blkbits;
1995 for(bh = head = page_buffers(page), block_start = 0;
1996 bh != head || !block_start;
1997 block_start=block_end, bh = bh->b_this_page) {
1998 block_end = block_start + blocksize;
1999 if (block_end <= from || block_start >= to) {
2000 if (!buffer_uptodate(bh))
2001 partial = 1;
2002 } else {
2003 set_buffer_uptodate(bh);
2004 mark_buffer_dirty(bh);
2009 * If this is a partial write which happened to make all buffers
2010 * uptodate then we can optimize away a bogus readpage() for
2011 * the next read(). Here we 'discover' whether the page went
2012 * uptodate as a result of this (potentially partial) write.
2014 if (!partial)
2015 SetPageUptodate(page);
2016 return 0;
2020 * Generic "read page" function for block devices that have the normal
2021 * get_block functionality. This is most of the block device filesystems.
2022 * Reads the page asynchronously --- the unlock_buffer() and
2023 * set/clear_buffer_uptodate() functions propagate buffer state into the
2024 * page struct once IO has completed.
2026 int block_read_full_page(struct page *page, get_block_t *get_block)
2028 struct inode *inode = page->mapping->host;
2029 sector_t iblock, lblock;
2030 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2031 unsigned int blocksize;
2032 int nr, i;
2033 int fully_mapped = 1;
2035 if (!PageLocked(page))
2036 PAGE_BUG(page);
2037 if (PageUptodate(page))
2038 buffer_error();
2039 blocksize = 1 << inode->i_blkbits;
2040 if (!page_has_buffers(page))
2041 create_empty_buffers(page, blocksize, 0);
2042 head = page_buffers(page);
2044 iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2045 lblock = (inode->i_size+blocksize-1) >> inode->i_blkbits;
2046 bh = head;
2047 nr = 0;
2048 i = 0;
2050 do {
2051 if (buffer_uptodate(bh))
2052 continue;
2054 if (!buffer_mapped(bh)) {
2055 fully_mapped = 0;
2056 if (iblock < lblock) {
2057 if (get_block(inode, iblock, bh, 0))
2058 SetPageError(page);
2060 if (!buffer_mapped(bh)) {
2061 void *kaddr = kmap_atomic(page, KM_USER0);
2062 memset(kaddr + i * blocksize, 0, blocksize);
2063 flush_dcache_page(page);
2064 kunmap_atomic(kaddr, KM_USER0);
2065 set_buffer_uptodate(bh);
2066 continue;
2069 * get_block() might have updated the buffer
2070 * synchronously
2072 if (buffer_uptodate(bh))
2073 continue;
2075 arr[nr++] = bh;
2076 } while (i++, iblock++, (bh = bh->b_this_page) != head);
2078 if (fully_mapped)
2079 SetPageMappedToDisk(page);
2081 if (!nr) {
2083 * All buffers are uptodate - we can set the page uptodate
2084 * as well. But not if get_block() returned an error.
2086 if (!PageError(page))
2087 SetPageUptodate(page);
2088 unlock_page(page);
2089 return 0;
2092 /* Stage two: lock the buffers */
2093 for (i = 0; i < nr; i++) {
2094 bh = arr[i];
2095 lock_buffer(bh);
2096 mark_buffer_async_read(bh);
2100 * Stage 3: start the IO. Check for uptodateness
2101 * inside the buffer lock in case another process reading
2102 * the underlying blockdev brought it uptodate (the sct fix).
2104 for (i = 0; i < nr; i++) {
2105 bh = arr[i];
2106 if (buffer_uptodate(bh))
2107 end_buffer_async_read(bh, 1);
2108 else
2109 submit_bh(READ, bh);
2111 return 0;
2114 /* utility function for filesystems that need to do work on expanding
2115 * truncates. Uses prepare/commit_write to allow the filesystem to
2116 * deal with the hole.
2118 int generic_cont_expand(struct inode *inode, loff_t size)
2120 struct address_space *mapping = inode->i_mapping;
2121 struct page *page;
2122 unsigned long index, offset, limit;
2123 int err;
2125 err = -EFBIG;
2126 limit = current->rlim[RLIMIT_FSIZE].rlim_cur;
2127 if (limit != RLIM_INFINITY && size > (loff_t)limit) {
2128 send_sig(SIGXFSZ, current, 0);
2129 goto out;
2131 if (size > inode->i_sb->s_maxbytes)
2132 goto out;
2134 offset = (size & (PAGE_CACHE_SIZE-1)); /* Within page */
2136 /* ugh. in prepare/commit_write, if from==to==start of block, we
2137 ** skip the prepare. make sure we never send an offset for the start
2138 ** of a block
2140 if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {
2141 offset++;
2143 index = size >> PAGE_CACHE_SHIFT;
2144 err = -ENOMEM;
2145 page = grab_cache_page(mapping, index);
2146 if (!page)
2147 goto out;
2148 err = mapping->a_ops->prepare_write(NULL, page, offset, offset);
2149 if (!err) {
2150 err = mapping->a_ops->commit_write(NULL, page, offset, offset);
2152 unlock_page(page);
2153 page_cache_release(page);
2154 if (err > 0)
2155 err = 0;
2156 out:
2157 return err;
2161 * For moronic filesystems that do not allow holes in file.
2162 * We may have to extend the file.
2165 int cont_prepare_write(struct page *page, unsigned offset,
2166 unsigned to, get_block_t *get_block, loff_t *bytes)
2168 struct address_space *mapping = page->mapping;
2169 struct inode *inode = mapping->host;
2170 struct page *new_page;
2171 unsigned long pgpos;
2172 long status;
2173 unsigned zerofrom;
2174 unsigned blocksize = 1 << inode->i_blkbits;
2175 void *kaddr;
2177 while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) {
2178 status = -ENOMEM;
2179 new_page = grab_cache_page(mapping, pgpos);
2180 if (!new_page)
2181 goto out;
2182 /* we might sleep */
2183 if (*bytes>>PAGE_CACHE_SHIFT != pgpos) {
2184 unlock_page(new_page);
2185 page_cache_release(new_page);
2186 continue;
2188 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2189 if (zerofrom & (blocksize-1)) {
2190 *bytes |= (blocksize-1);
2191 (*bytes)++;
2193 status = __block_prepare_write(inode, new_page, zerofrom,
2194 PAGE_CACHE_SIZE, get_block);
2195 if (status)
2196 goto out_unmap;
2197 kaddr = kmap_atomic(new_page, KM_USER0);
2198 memset(kaddr+zerofrom, 0, PAGE_CACHE_SIZE-zerofrom);
2199 flush_dcache_page(new_page);
2200 kunmap_atomic(kaddr, KM_USER0);
2201 __block_commit_write(inode, new_page,
2202 zerofrom, PAGE_CACHE_SIZE);
2203 unlock_page(new_page);
2204 page_cache_release(new_page);
2207 if (page->index < pgpos) {
2208 /* completely inside the area */
2209 zerofrom = offset;
2210 } else {
2211 /* page covers the boundary, find the boundary offset */
2212 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2214 /* if we will expand the thing last block will be filled */
2215 if (to > zerofrom && (zerofrom & (blocksize-1))) {
2216 *bytes |= (blocksize-1);
2217 (*bytes)++;
2220 /* starting below the boundary? Nothing to zero out */
2221 if (offset <= zerofrom)
2222 zerofrom = offset;
2224 status = __block_prepare_write(inode, page, zerofrom, to, get_block);
2225 if (status)
2226 goto out1;
2227 if (zerofrom < offset) {
2228 kaddr = kmap_atomic(page, KM_USER0);
2229 memset(kaddr+zerofrom, 0, offset-zerofrom);
2230 flush_dcache_page(page);
2231 kunmap_atomic(kaddr, KM_USER0);
2232 __block_commit_write(inode, page, zerofrom, offset);
2234 return 0;
2235 out1:
2236 ClearPageUptodate(page);
2237 return status;
2239 out_unmap:
2240 ClearPageUptodate(new_page);
2241 unlock_page(new_page);
2242 page_cache_release(new_page);
2243 out:
2244 return status;
2247 int block_prepare_write(struct page *page, unsigned from, unsigned to,
2248 get_block_t *get_block)
2250 struct inode *inode = page->mapping->host;
2251 int err = __block_prepare_write(inode, page, from, to, get_block);
2252 if (err)
2253 ClearPageUptodate(page);
2254 return err;
2257 int block_commit_write(struct page *page, unsigned from, unsigned to)
2259 struct inode *inode = page->mapping->host;
2260 __block_commit_write(inode,page,from,to);
2261 return 0;
2264 int generic_commit_write(struct file *file, struct page *page,
2265 unsigned from, unsigned to)
2267 struct inode *inode = page->mapping->host;
2268 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2269 __block_commit_write(inode,page,from,to);
2270 if (pos > inode->i_size) {
2271 inode->i_size = pos;
2272 mark_inode_dirty(inode);
2274 return 0;
2278 * On entry, the page is fully not uptodate.
2279 * On exit the page is fully uptodate in the areas outside (from,to)
2281 int nobh_prepare_write(struct page *page, unsigned from, unsigned to,
2282 get_block_t *get_block)
2284 struct inode *inode = page->mapping->host;
2285 const unsigned blkbits = inode->i_blkbits;
2286 const unsigned blocksize = 1 << blkbits;
2287 struct buffer_head map_bh;
2288 struct buffer_head *read_bh[MAX_BUF_PER_PAGE];
2289 unsigned block_in_page;
2290 unsigned block_start;
2291 sector_t block_in_file;
2292 char *kaddr;
2293 int nr_reads = 0;
2294 int i;
2295 int ret = 0;
2296 int is_mapped_to_disk = 1;
2297 int dirtied_it = 0;
2299 if (PageMappedToDisk(page))
2300 return 0;
2302 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2303 map_bh.b_page = page;
2306 * We loop across all blocks in the page, whether or not they are
2307 * part of the affected region. This is so we can discover if the
2308 * page is fully mapped-to-disk.
2310 for (block_start = 0, block_in_page = 0;
2311 block_start < PAGE_CACHE_SIZE;
2312 block_in_page++, block_start += blocksize) {
2313 unsigned block_end = block_start + blocksize;
2314 int create;
2316 map_bh.b_state = 0;
2317 create = 1;
2318 if (block_start >= to)
2319 create = 0;
2320 ret = get_block(inode, block_in_file + block_in_page,
2321 &map_bh, create);
2322 if (ret)
2323 goto failed;
2324 if (!buffer_mapped(&map_bh))
2325 is_mapped_to_disk = 0;
2326 if (buffer_new(&map_bh))
2327 unmap_underlying_metadata(map_bh.b_bdev,
2328 map_bh.b_blocknr);
2329 if (PageUptodate(page))
2330 continue;
2331 if (buffer_new(&map_bh) || !buffer_mapped(&map_bh)) {
2332 kaddr = kmap_atomic(page, KM_USER0);
2333 if (block_start < from) {
2334 memset(kaddr+block_start, 0, from-block_start);
2335 dirtied_it = 1;
2337 if (block_end > to) {
2338 memset(kaddr + to, 0, block_end - to);
2339 dirtied_it = 1;
2341 flush_dcache_page(page);
2342 kunmap_atomic(kaddr, KM_USER0);
2343 continue;
2345 if (buffer_uptodate(&map_bh))
2346 continue; /* reiserfs does this */
2347 if (block_start < from || block_end > to) {
2348 struct buffer_head *bh = alloc_buffer_head(GFP_NOFS);
2350 if (!bh) {
2351 ret = -ENOMEM;
2352 goto failed;
2354 bh->b_state = map_bh.b_state;
2355 atomic_set(&bh->b_count, 0);
2356 bh->b_this_page = 0;
2357 bh->b_page = page;
2358 bh->b_blocknr = map_bh.b_blocknr;
2359 bh->b_size = blocksize;
2360 bh->b_data = (char *)(long)block_start;
2361 bh->b_bdev = map_bh.b_bdev;
2362 bh->b_private = NULL;
2363 read_bh[nr_reads++] = bh;
2367 if (nr_reads) {
2368 ll_rw_block(READ, nr_reads, read_bh);
2369 for (i = 0; i < nr_reads; i++) {
2370 wait_on_buffer(read_bh[i]);
2371 if (!buffer_uptodate(read_bh[i]))
2372 ret = -EIO;
2373 free_buffer_head(read_bh[i]);
2374 read_bh[i] = NULL;
2376 if (ret)
2377 goto failed;
2380 if (is_mapped_to_disk)
2381 SetPageMappedToDisk(page);
2382 SetPageUptodate(page);
2385 * Setting the page dirty here isn't necessary for the prepare_write
2386 * function - commit_write will do that. But if/when this function is
2387 * used within the pagefault handler to ensure that all mmapped pages
2388 * have backing space in the filesystem, we will need to dirty the page
2389 * if its contents were altered.
2391 if (dirtied_it)
2392 set_page_dirty(page);
2394 return 0;
2396 failed:
2397 for (i = 0; i < nr_reads; i++) {
2398 if (read_bh[i])
2399 free_buffer_head(read_bh[i]);
2403 * Error recovery is pretty slack. Clear the page and mark it dirty
2404 * so we'll later zero out any blocks which _were_ allocated.
2406 kaddr = kmap_atomic(page, KM_USER0);
2407 memset(kaddr, 0, PAGE_CACHE_SIZE);
2408 kunmap_atomic(kaddr, KM_USER0);
2409 SetPageUptodate(page);
2410 set_page_dirty(page);
2411 return ret;
2413 EXPORT_SYMBOL(nobh_prepare_write);
2415 int nobh_commit_write(struct file *file, struct page *page,
2416 unsigned from, unsigned to)
2418 struct inode *inode = page->mapping->host;
2419 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2421 set_page_dirty(page);
2422 if (pos > inode->i_size) {
2423 inode->i_size = pos;
2424 mark_inode_dirty(inode);
2426 return 0;
2428 EXPORT_SYMBOL(nobh_commit_write);
2431 * This function assumes that ->prepare_write() uses nobh_prepare_write().
2433 int nobh_truncate_page(struct address_space *mapping, loff_t from)
2435 struct inode *inode = mapping->host;
2436 unsigned blocksize = 1 << inode->i_blkbits;
2437 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2438 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2439 unsigned to;
2440 struct page *page;
2441 struct address_space_operations *a_ops = mapping->a_ops;
2442 char *kaddr;
2443 int ret = 0;
2445 if ((offset & (blocksize - 1)) == 0)
2446 goto out;
2448 ret = -ENOMEM;
2449 page = grab_cache_page(mapping, index);
2450 if (!page)
2451 goto out;
2453 to = (offset + blocksize) & ~(blocksize - 1);
2454 ret = a_ops->prepare_write(NULL, page, offset, to);
2455 if (ret == 0) {
2456 kaddr = kmap_atomic(page, KM_USER0);
2457 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2458 flush_dcache_page(page);
2459 kunmap_atomic(kaddr, KM_USER0);
2460 set_page_dirty(page);
2462 unlock_page(page);
2463 page_cache_release(page);
2464 out:
2465 return ret;
2467 EXPORT_SYMBOL(nobh_truncate_page);
2469 int block_truncate_page(struct address_space *mapping,
2470 loff_t from, get_block_t *get_block)
2472 unsigned long index = from >> PAGE_CACHE_SHIFT;
2473 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2474 unsigned blocksize, iblock, length, pos;
2475 struct inode *inode = mapping->host;
2476 struct page *page;
2477 struct buffer_head *bh;
2478 void *kaddr;
2479 int err;
2481 blocksize = 1 << inode->i_blkbits;
2482 length = offset & (blocksize - 1);
2484 /* Block boundary? Nothing to do */
2485 if (!length)
2486 return 0;
2488 length = blocksize - length;
2489 iblock = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2491 page = grab_cache_page(mapping, index);
2492 err = -ENOMEM;
2493 if (!page)
2494 goto out;
2496 if (!page_has_buffers(page))
2497 create_empty_buffers(page, blocksize, 0);
2499 /* Find the buffer that contains "offset" */
2500 bh = page_buffers(page);
2501 pos = blocksize;
2502 while (offset >= pos) {
2503 bh = bh->b_this_page;
2504 iblock++;
2505 pos += blocksize;
2508 err = 0;
2509 if (!buffer_mapped(bh)) {
2510 err = get_block(inode, iblock, bh, 0);
2511 if (err)
2512 goto unlock;
2513 /* unmapped? It's a hole - nothing to do */
2514 if (!buffer_mapped(bh))
2515 goto unlock;
2518 /* Ok, it's mapped. Make sure it's up-to-date */
2519 if (PageUptodate(page))
2520 set_buffer_uptodate(bh);
2522 if (!buffer_uptodate(bh) && !buffer_delay(bh)) {
2523 err = -EIO;
2524 ll_rw_block(READ, 1, &bh);
2525 wait_on_buffer(bh);
2526 /* Uhhuh. Read error. Complain and punt. */
2527 if (!buffer_uptodate(bh))
2528 goto unlock;
2531 kaddr = kmap_atomic(page, KM_USER0);
2532 memset(kaddr + offset, 0, length);
2533 flush_dcache_page(page);
2534 kunmap_atomic(kaddr, KM_USER0);
2536 mark_buffer_dirty(bh);
2537 err = 0;
2539 unlock:
2540 unlock_page(page);
2541 page_cache_release(page);
2542 out:
2543 return err;
2547 * The generic ->writepage function for buffer-backed address_spaces
2549 int block_write_full_page(struct page *page, get_block_t *get_block,
2550 struct writeback_control *wbc)
2552 struct inode * const inode = page->mapping->host;
2553 const unsigned long end_index = inode->i_size >> PAGE_CACHE_SHIFT;
2554 unsigned offset;
2555 void *kaddr;
2557 /* Is the page fully inside i_size? */
2558 if (page->index < end_index)
2559 return __block_write_full_page(inode, page, get_block, wbc);
2561 /* Is the page fully outside i_size? (truncate in progress) */
2562 offset = inode->i_size & (PAGE_CACHE_SIZE-1);
2563 if (page->index >= end_index+1 || !offset) {
2565 * The page may have dirty, unmapped buffers. For example,
2566 * they may have been added in ext3_writepage(). Make them
2567 * freeable here, so the page does not leak.
2569 block_invalidatepage(page, 0);
2570 unlock_page(page);
2571 return -EIO;
2575 * The page straddles i_size. It must be zeroed out on each and every
2576 * writepage invocation because it may be mmapped. "A file is mapped
2577 * in multiples of the page size. For a file that is not a multiple of
2578 * the page size, the remaining memory is zeroed when mapped, and
2579 * writes to that region are not written out to the file."
2581 kaddr = kmap_atomic(page, KM_USER0);
2582 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2583 flush_dcache_page(page);
2584 kunmap_atomic(kaddr, KM_USER0);
2585 return __block_write_full_page(inode, page, get_block, wbc);
2588 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2589 get_block_t *get_block)
2591 struct buffer_head tmp;
2592 struct inode *inode = mapping->host;
2593 tmp.b_state = 0;
2594 tmp.b_blocknr = 0;
2595 get_block(inode, block, &tmp, 0);
2596 return tmp.b_blocknr;
2599 static int end_bio_bh_io_sync(struct bio *bio, unsigned int bytes_done, int err)
2601 struct buffer_head *bh = bio->bi_private;
2603 if (bio->bi_size)
2604 return 1;
2606 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2607 bio_put(bio);
2608 return 0;
2611 int submit_bh(int rw, struct buffer_head * bh)
2613 struct bio *bio;
2615 BUG_ON(!buffer_locked(bh));
2616 BUG_ON(!buffer_mapped(bh));
2617 BUG_ON(!bh->b_end_io);
2619 if ((rw == READ || rw == READA) && buffer_uptodate(bh))
2620 buffer_error();
2621 if (rw == WRITE && !buffer_uptodate(bh))
2622 buffer_error();
2623 if (rw == READ && buffer_dirty(bh))
2624 buffer_error();
2626 set_buffer_req(bh);
2629 * from here on down, it's all bio -- do the initial mapping,
2630 * submit_bio -> generic_make_request may further map this bio around
2632 bio = bio_alloc(GFP_NOIO, 1);
2634 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2635 bio->bi_bdev = bh->b_bdev;
2636 bio->bi_io_vec[0].bv_page = bh->b_page;
2637 bio->bi_io_vec[0].bv_len = bh->b_size;
2638 bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2640 bio->bi_vcnt = 1;
2641 bio->bi_idx = 0;
2642 bio->bi_size = bh->b_size;
2644 bio->bi_end_io = end_bio_bh_io_sync;
2645 bio->bi_private = bh;
2647 return submit_bio(rw, bio);
2651 * ll_rw_block: low-level access to block devices (DEPRECATED)
2652 * @rw: whether to %READ or %WRITE or maybe %READA (readahead)
2653 * @nr: number of &struct buffer_heads in the array
2654 * @bhs: array of pointers to &struct buffer_head
2656 * ll_rw_block() takes an array of pointers to &struct buffer_heads,
2657 * and requests an I/O operation on them, either a %READ or a %WRITE.
2658 * The third %READA option is described in the documentation for
2659 * generic_make_request() which ll_rw_block() calls.
2661 * This function drops any buffer that it cannot get a lock on (with the
2662 * BH_Lock state bit), any buffer that appears to be clean when doing a
2663 * write request, and any buffer that appears to be up-to-date when doing
2664 * read request. Further it marks as clean buffers that are processed for
2665 * writing (the buffer cache won't assume that they are actually clean until
2666 * the buffer gets unlocked).
2668 * ll_rw_block sets b_end_io to simple completion handler that marks
2669 * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2670 * any waiters.
2672 * All of the buffers must be for the same device, and must also be a
2673 * multiple of the current approved size for the device.
2675 void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2677 int i;
2679 for (i = 0; i < nr; i++) {
2680 struct buffer_head *bh = bhs[i];
2682 if (test_set_buffer_locked(bh))
2683 continue;
2685 get_bh(bh);
2686 bh->b_end_io = end_buffer_io_sync;
2687 if (rw == WRITE) {
2688 if (test_clear_buffer_dirty(bh)) {
2689 submit_bh(WRITE, bh);
2690 continue;
2692 } else {
2693 if (!buffer_uptodate(bh)) {
2694 submit_bh(rw, bh);
2695 continue;
2698 unlock_buffer(bh);
2699 put_bh(bh);
2704 * For a data-integrity writeout, we need to wait upon any in-progress I/O
2705 * and then start new I/O and then wait upon it.
2707 void sync_dirty_buffer(struct buffer_head *bh)
2709 WARN_ON(atomic_read(&bh->b_count) < 1);
2710 lock_buffer(bh);
2711 if (test_clear_buffer_dirty(bh)) {
2712 get_bh(bh);
2713 bh->b_end_io = end_buffer_io_sync;
2714 submit_bh(WRITE, bh);
2715 wait_on_buffer(bh);
2716 } else {
2717 unlock_buffer(bh);
2722 * Sanity checks for try_to_free_buffers.
2724 static void check_ttfb_buffer(struct page *page, struct buffer_head *bh)
2726 if (!buffer_uptodate(bh) && !buffer_req(bh)) {
2727 if (PageUptodate(page) && page->mapping
2728 && buffer_mapped(bh) /* discard_buffer */
2729 && S_ISBLK(page->mapping->host->i_mode))
2731 buffer_error();
2737 * try_to_free_buffers() checks if all the buffers on this particular page
2738 * are unused, and releases them if so.
2740 * Exclusion against try_to_free_buffers may be obtained by either
2741 * locking the page or by holding its mapping's private_lock.
2743 * If the page is dirty but all the buffers are clean then we need to
2744 * be sure to mark the page clean as well. This is because the page
2745 * may be against a block device, and a later reattachment of buffers
2746 * to a dirty page will set *all* buffers dirty. Which would corrupt
2747 * filesystem data on the same device.
2749 * The same applies to regular filesystem pages: if all the buffers are
2750 * clean then we set the page clean and proceed. To do that, we require
2751 * total exclusion from __set_page_dirty_buffers(). That is obtained with
2752 * private_lock.
2754 * try_to_free_buffers() is non-blocking.
2756 static inline int buffer_busy(struct buffer_head *bh)
2758 return atomic_read(&bh->b_count) |
2759 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
2762 static int
2763 drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
2765 struct buffer_head *head = page_buffers(page);
2766 struct buffer_head *bh;
2767 int was_uptodate = 1;
2769 bh = head;
2770 do {
2771 check_ttfb_buffer(page, bh);
2772 if (buffer_busy(bh))
2773 goto failed;
2774 if (!buffer_uptodate(bh) && !buffer_req(bh))
2775 was_uptodate = 0;
2776 bh = bh->b_this_page;
2777 } while (bh != head);
2779 if (!was_uptodate && PageUptodate(page))
2780 buffer_error();
2782 do {
2783 struct buffer_head *next = bh->b_this_page;
2785 if (!list_empty(&bh->b_assoc_buffers))
2786 __remove_assoc_queue(bh);
2787 bh = next;
2788 } while (bh != head);
2789 *buffers_to_free = head;
2790 __clear_page_buffers(page);
2791 return 1;
2792 failed:
2793 return 0;
2796 int try_to_free_buffers(struct page *page)
2798 struct address_space * const mapping = page->mapping;
2799 struct buffer_head *buffers_to_free = NULL;
2800 int ret = 0;
2802 BUG_ON(!PageLocked(page));
2803 if (PageWriteback(page))
2804 return 0;
2806 if (mapping == NULL) { /* swapped-in anon page */
2807 ret = drop_buffers(page, &buffers_to_free);
2808 goto out;
2811 spin_lock(&mapping->private_lock);
2812 ret = drop_buffers(page, &buffers_to_free);
2813 if (ret && !PageSwapCache(page)) {
2815 * If the filesystem writes its buffers by hand (eg ext3)
2816 * then we can have clean buffers against a dirty page. We
2817 * clean the page here; otherwise later reattachment of buffers
2818 * could encounter a non-uptodate page, which is unresolvable.
2819 * This only applies in the rare case where try_to_free_buffers
2820 * succeeds but the page is not freed.
2822 clear_page_dirty(page);
2824 spin_unlock(&mapping->private_lock);
2825 out:
2826 if (buffers_to_free) {
2827 struct buffer_head *bh = buffers_to_free;
2829 do {
2830 struct buffer_head *next = bh->b_this_page;
2831 free_buffer_head(bh);
2832 bh = next;
2833 } while (bh != buffers_to_free);
2835 return ret;
2837 EXPORT_SYMBOL(try_to_free_buffers);
2839 int block_sync_page(struct page *page)
2841 blk_run_queues();
2842 return 0;
2846 * There are no bdflush tunables left. But distributions are
2847 * still running obsolete flush daemons, so we terminate them here.
2849 * Use of bdflush() is deprecated and will be removed in a future kernel.
2850 * The `pdflush' kernel threads fully replace bdflush daemons and this call.
2852 asmlinkage long sys_bdflush(int func, long data)
2854 static int msg_count;
2856 if (!capable(CAP_SYS_ADMIN))
2857 return -EPERM;
2859 if (msg_count < 5) {
2860 msg_count++;
2861 printk(KERN_INFO
2862 "warning: process `%s' used the obsolete bdflush"
2863 " system call\n", current->comm);
2864 printk(KERN_INFO "Fix your initscripts?\n");
2867 if (func == 1)
2868 do_exit(0);
2869 return 0;
2873 * Buffer-head allocation
2875 static kmem_cache_t *bh_cachep;
2878 * Once the number of bh's in the machine exceeds this level, we start
2879 * stripping them in writeback.
2881 static int max_buffer_heads;
2883 int buffer_heads_over_limit;
2885 struct bh_accounting {
2886 int nr; /* Number of live bh's */
2887 int ratelimit; /* Limit cacheline bouncing */
2890 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
2892 static void recalc_bh_state(void)
2894 int i;
2895 int tot = 0;
2897 if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
2898 return;
2899 __get_cpu_var(bh_accounting).ratelimit = 0;
2900 for (i = 0; i < NR_CPUS; i++) {
2901 if (cpu_online(i))
2902 tot += per_cpu(bh_accounting, i).nr;
2904 buffer_heads_over_limit = (tot > max_buffer_heads);
2907 struct buffer_head *alloc_buffer_head(int gfp_flags)
2909 struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
2910 if (ret) {
2911 preempt_disable();
2912 __get_cpu_var(bh_accounting).nr++;
2913 recalc_bh_state();
2914 preempt_enable();
2916 return ret;
2918 EXPORT_SYMBOL(alloc_buffer_head);
2920 void free_buffer_head(struct buffer_head *bh)
2922 BUG_ON(!list_empty(&bh->b_assoc_buffers));
2923 kmem_cache_free(bh_cachep, bh);
2924 preempt_disable();
2925 __get_cpu_var(bh_accounting).nr--;
2926 recalc_bh_state();
2927 preempt_enable();
2929 EXPORT_SYMBOL(free_buffer_head);
2931 static void
2932 init_buffer_head(void *data, kmem_cache_t *cachep, unsigned long flags)
2934 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
2935 SLAB_CTOR_CONSTRUCTOR) {
2936 struct buffer_head * bh = (struct buffer_head *)data;
2938 memset(bh, 0, sizeof(*bh));
2939 INIT_LIST_HEAD(&bh->b_assoc_buffers);
2943 static void buffer_init_cpu(int cpu)
2945 struct bh_accounting *bha = &per_cpu(bh_accounting, cpu);
2946 struct bh_lru *bhl = &per_cpu(bh_lrus, cpu);
2948 bha->nr = 0;
2949 bha->ratelimit = 0;
2950 memset(bhl, 0, sizeof(*bhl));
2953 static int __devinit buffer_cpu_notify(struct notifier_block *self,
2954 unsigned long action, void *hcpu)
2956 long cpu = (long)hcpu;
2957 switch(action) {
2958 case CPU_UP_PREPARE:
2959 buffer_init_cpu(cpu);
2960 break;
2961 default:
2962 break;
2964 return NOTIFY_OK;
2967 static struct notifier_block __devinitdata buffer_nb = {
2968 .notifier_call = buffer_cpu_notify,
2971 void __init buffer_init(void)
2973 int i;
2974 int nrpages;
2976 bh_cachep = kmem_cache_create("buffer_head",
2977 sizeof(struct buffer_head), 0,
2978 0, init_buffer_head, NULL);
2979 for (i = 0; i < ARRAY_SIZE(bh_wait_queue_heads); i++)
2980 init_waitqueue_head(&bh_wait_queue_heads[i].wqh);
2983 * Limit the bh occupancy to 10% of ZONE_NORMAL
2985 nrpages = (nr_free_buffer_pages() * 10) / 100;
2986 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
2987 buffer_cpu_notify(&buffer_nb, (unsigned long)CPU_UP_PREPARE,
2988 (void *)(long)smp_processor_id());
2989 register_cpu_notifier(&buffer_nb);