[PATCH] Fix up 'linux-dvb' maintainers entry
[linux-2.6/history.git] / fs / buffer.c
blobcb8c281fa0be58bfb57a191b6b54673a68114f8f
1 /*
2 * linux/fs/buffer.c
4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
5 */
7 /*
8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
10 * Removed a lot of unnecessary code and simplified things now that
11 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
13 * Speed up hash, lru, and free list operations. Use gfp() for allocating
14 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
16 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
18 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
21 #include <linux/config.h>
22 #include <linux/kernel.h>
23 #include <linux/fs.h>
24 #include <linux/mm.h>
25 #include <linux/percpu.h>
26 #include <linux/slab.h>
27 #include <linux/smp_lock.h>
28 #include <linux/blkdev.h>
29 #include <linux/file.h>
30 #include <linux/quotaops.h>
31 #include <linux/highmem.h>
32 #include <linux/module.h>
33 #include <linux/writeback.h>
34 #include <linux/hash.h>
35 #include <linux/suspend.h>
36 #include <linux/buffer_head.h>
37 #include <linux/bio.h>
38 #include <linux/notifier.h>
39 #include <linux/cpu.h>
40 #include <asm/bitops.h>
42 static void invalidate_bh_lrus(void);
44 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
47 * Hashed waitqueue_head's for wait_on_buffer()
49 #define BH_WAIT_TABLE_ORDER 7
50 static struct bh_wait_queue_head {
51 wait_queue_head_t wqh;
52 } ____cacheline_aligned_in_smp bh_wait_queue_heads[1<<BH_WAIT_TABLE_ORDER];
55 * Debug/devel support stuff
58 void __buffer_error(char *file, int line)
60 static int enough;
62 if (enough > 10)
63 return;
64 enough++;
65 printk("buffer layer error at %s:%d\n", file, line);
66 #ifndef CONFIG_KALLSYMS
67 printk("Pass this trace through ksymoops for reporting\n");
68 #endif
69 dump_stack();
71 EXPORT_SYMBOL(__buffer_error);
73 inline void
74 init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
76 bh->b_end_io = handler;
77 bh->b_private = private;
81 * Return the address of the waitqueue_head to be used for this
82 * buffer_head
84 wait_queue_head_t *bh_waitq_head(struct buffer_head *bh)
86 return &bh_wait_queue_heads[hash_ptr(bh, BH_WAIT_TABLE_ORDER)].wqh;
88 EXPORT_SYMBOL(bh_waitq_head);
90 void wake_up_buffer(struct buffer_head *bh)
92 wait_queue_head_t *wq = bh_waitq_head(bh);
94 smp_mb();
95 if (waitqueue_active(wq))
96 wake_up_all(wq);
98 EXPORT_SYMBOL(wake_up_buffer);
100 void unlock_buffer(struct buffer_head *bh)
103 * unlock_buffer against a zero-count bh is a bug, if the page
104 * is not locked. Because then nothing protects the buffer's
105 * waitqueue, which is used here. (Well. Other locked buffers
106 * against the page will pin it. But complain anyway).
108 if (atomic_read(&bh->b_count) == 0 &&
109 !PageLocked(bh->b_page) &&
110 !PageWriteback(bh->b_page))
111 buffer_error();
113 clear_buffer_locked(bh);
114 smp_mb__after_clear_bit();
115 wake_up_buffer(bh);
119 * Block until a buffer comes unlocked. This doesn't stop it
120 * from becoming locked again - you have to lock it yourself
121 * if you want to preserve its state.
123 void __wait_on_buffer(struct buffer_head * bh)
125 wait_queue_head_t *wqh = bh_waitq_head(bh);
126 DEFINE_WAIT(wait);
128 if (atomic_read(&bh->b_count) == 0 &&
129 (!bh->b_page || !PageLocked(bh->b_page)))
130 buffer_error();
132 do {
133 prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
134 if (buffer_locked(bh)) {
135 blk_run_queues();
136 io_schedule();
138 } while (buffer_locked(bh));
139 finish_wait(wqh, &wait);
142 static void
143 __set_page_buffers(struct page *page, struct buffer_head *head)
145 if (page_has_buffers(page))
146 buffer_error();
147 page_cache_get(page);
148 SetPagePrivate(page);
149 page->private = (unsigned long)head;
152 static void
153 __clear_page_buffers(struct page *page)
155 ClearPagePrivate(page);
156 page->private = 0;
157 page_cache_release(page);
160 static void buffer_io_error(struct buffer_head *bh)
162 char b[BDEVNAME_SIZE];
164 printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
165 bdevname(bh->b_bdev, b),
166 (unsigned long long)bh->b_blocknr);
170 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
171 * unlock the buffer. This is what ll_rw_block uses too.
173 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
175 if (uptodate) {
176 set_buffer_uptodate(bh);
177 } else {
178 /* This happens, due to failed READA attempts. */
179 clear_buffer_uptodate(bh);
181 unlock_buffer(bh);
182 put_bh(bh);
185 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
187 char b[BDEVNAME_SIZE];
189 if (uptodate) {
190 set_buffer_uptodate(bh);
191 } else {
192 if (printk_ratelimit()) {
193 buffer_io_error(bh);
194 printk(KERN_WARNING "lost page write due to "
195 "I/O error on %s\n",
196 bdevname(bh->b_bdev, b));
198 set_buffer_write_io_error(bh);
199 clear_buffer_uptodate(bh);
201 unlock_buffer(bh);
202 put_bh(bh);
206 * Write out and wait upon all the dirty data associated with a block
207 * device via its mapping. Does not take the superblock lock.
209 int sync_blockdev(struct block_device *bdev)
211 int ret = 0;
213 if (bdev) {
214 int err;
216 ret = filemap_fdatawrite(bdev->bd_inode->i_mapping);
217 err = filemap_fdatawait(bdev->bd_inode->i_mapping);
218 if (!ret)
219 ret = err;
221 return ret;
223 EXPORT_SYMBOL(sync_blockdev);
226 * Write out and wait upon all dirty data associated with this
227 * superblock. Filesystem data as well as the underlying block
228 * device. Takes the superblock lock.
230 int fsync_super(struct super_block *sb)
232 sync_inodes_sb(sb, 0);
233 DQUOT_SYNC(sb);
234 lock_super(sb);
235 if (sb->s_dirt && sb->s_op->write_super)
236 sb->s_op->write_super(sb);
237 unlock_super(sb);
238 if (sb->s_op->sync_fs)
239 sb->s_op->sync_fs(sb, 1);
240 sync_blockdev(sb->s_bdev);
241 sync_inodes_sb(sb, 1);
243 return sync_blockdev(sb->s_bdev);
247 * Write out and wait upon all dirty data associated with this
248 * device. Filesystem data as well as the underlying block
249 * device. Takes the superblock lock.
251 int fsync_bdev(struct block_device *bdev)
253 struct super_block *sb = get_super(bdev);
254 if (sb) {
255 int res = fsync_super(sb);
256 drop_super(sb);
257 return res;
259 return sync_blockdev(bdev);
263 * sync everything. Start out by waking pdflush, because that writes back
264 * all queues in parallel.
266 static void do_sync(unsigned long wait)
268 wakeup_bdflush(0);
269 sync_inodes(0); /* All mappings, inodes and their blockdevs */
270 DQUOT_SYNC(NULL);
271 sync_supers(); /* Write the superblocks */
272 sync_filesystems(0); /* Start syncing the filesystems */
273 sync_filesystems(wait); /* Waitingly sync the filesystems */
274 sync_inodes(wait); /* Mappings, inodes and blockdevs, again. */
275 if (!wait)
276 printk("Emergency Sync complete\n");
279 asmlinkage long sys_sync(void)
281 do_sync(1);
282 return 0;
285 void emergency_sync(void)
287 pdflush_operation(do_sync, 0);
291 * Generic function to fsync a file.
293 * filp may be NULL if called via the msync of a vma.
296 int file_fsync(struct file *filp, struct dentry *dentry, int datasync)
298 struct inode * inode = dentry->d_inode;
299 struct super_block * sb;
300 int ret;
302 /* sync the inode to buffers */
303 write_inode_now(inode, 0);
305 /* sync the superblock to buffers */
306 sb = inode->i_sb;
307 lock_super(sb);
308 if (sb->s_op->write_super)
309 sb->s_op->write_super(sb);
310 unlock_super(sb);
312 /* .. finally sync the buffers to disk */
313 ret = sync_blockdev(sb->s_bdev);
314 return ret;
317 asmlinkage long sys_fsync(unsigned int fd)
319 struct file * file;
320 struct address_space *mapping;
321 int ret, err;
323 ret = -EBADF;
324 file = fget(fd);
325 if (!file)
326 goto out;
328 mapping = file->f_mapping;
330 ret = -EINVAL;
331 if (!file->f_op || !file->f_op->fsync) {
332 /* Why? We can still call filemap_fdatawrite */
333 goto out_putf;
336 /* We need to protect against concurrent writers.. */
337 down(&mapping->host->i_sem);
338 current->flags |= PF_SYNCWRITE;
339 ret = filemap_fdatawrite(mapping);
340 err = file->f_op->fsync(file, file->f_dentry, 0);
341 if (!ret)
342 ret = err;
343 err = filemap_fdatawait(mapping);
344 if (!ret)
345 ret = err;
346 current->flags &= ~PF_SYNCWRITE;
347 up(&mapping->host->i_sem);
349 out_putf:
350 fput(file);
351 out:
352 return ret;
355 asmlinkage long sys_fdatasync(unsigned int fd)
357 struct file * file;
358 struct address_space *mapping;
359 int ret, err;
361 ret = -EBADF;
362 file = fget(fd);
363 if (!file)
364 goto out;
366 ret = -EINVAL;
367 if (!file->f_op || !file->f_op->fsync)
368 goto out_putf;
370 mapping = file->f_mapping;
372 down(&mapping->host->i_sem);
373 current->flags |= PF_SYNCWRITE;
374 ret = filemap_fdatawrite(mapping);
375 err = file->f_op->fsync(file, file->f_dentry, 1);
376 if (!ret)
377 ret = err;
378 err = filemap_fdatawait(mapping);
379 if (!ret)
380 ret = err;
381 current->flags &= ~PF_SYNCWRITE;
382 up(&mapping->host->i_sem);
384 out_putf:
385 fput(file);
386 out:
387 return ret;
391 * Various filesystems appear to want __find_get_block to be non-blocking.
392 * But it's the page lock which protects the buffers. To get around this,
393 * we get exclusion from try_to_free_buffers with the blockdev mapping's
394 * private_lock.
396 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
397 * may be quite high. This code could TryLock the page, and if that
398 * succeeds, there is no need to take private_lock. (But if
399 * private_lock is contended then so is mapping->page_lock).
401 static struct buffer_head *
402 __find_get_block_slow(struct block_device *bdev, sector_t block, int unused)
404 struct inode *bd_inode = bdev->bd_inode;
405 struct address_space *bd_mapping = bd_inode->i_mapping;
406 struct buffer_head *ret = NULL;
407 unsigned long index;
408 struct buffer_head *bh;
409 struct buffer_head *head;
410 struct page *page;
412 index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
413 page = find_get_page(bd_mapping, index);
414 if (!page)
415 goto out;
417 spin_lock(&bd_mapping->private_lock);
418 if (!page_has_buffers(page))
419 goto out_unlock;
420 head = page_buffers(page);
421 bh = head;
422 do {
423 if (bh->b_blocknr == block) {
424 ret = bh;
425 get_bh(bh);
426 goto out_unlock;
428 bh = bh->b_this_page;
429 } while (bh != head);
430 buffer_error();
431 printk("block=%llu, b_blocknr=%llu\n",
432 (unsigned long long)block, (unsigned long long)bh->b_blocknr);
433 printk("b_state=0x%08lx, b_size=%u\n", bh->b_state, bh->b_size);
434 out_unlock:
435 spin_unlock(&bd_mapping->private_lock);
436 page_cache_release(page);
437 out:
438 return ret;
441 /* If invalidate_buffers() will trash dirty buffers, it means some kind
442 of fs corruption is going on. Trashing dirty data always imply losing
443 information that was supposed to be just stored on the physical layer
444 by the user.
446 Thus invalidate_buffers in general usage is not allwowed to trash
447 dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
448 be preserved. These buffers are simply skipped.
450 We also skip buffers which are still in use. For example this can
451 happen if a userspace program is reading the block device.
453 NOTE: In the case where the user removed a removable-media-disk even if
454 there's still dirty data not synced on disk (due a bug in the device driver
455 or due an error of the user), by not destroying the dirty buffers we could
456 generate corruption also on the next media inserted, thus a parameter is
457 necessary to handle this case in the most safe way possible (trying
458 to not corrupt also the new disk inserted with the data belonging to
459 the old now corrupted disk). Also for the ramdisk the natural thing
460 to do in order to release the ramdisk memory is to destroy dirty buffers.
462 These are two special cases. Normal usage imply the device driver
463 to issue a sync on the device (without waiting I/O completion) and
464 then an invalidate_buffers call that doesn't trash dirty buffers.
466 For handling cache coherency with the blkdev pagecache the 'update' case
467 is been introduced. It is needed to re-read from disk any pinned
468 buffer. NOTE: re-reading from disk is destructive so we can do it only
469 when we assume nobody is changing the buffercache under our I/O and when
470 we think the disk contains more recent information than the buffercache.
471 The update == 1 pass marks the buffers we need to update, the update == 2
472 pass does the actual I/O. */
473 void invalidate_bdev(struct block_device *bdev, int destroy_dirty_buffers)
475 invalidate_bh_lrus();
477 * FIXME: what about destroy_dirty_buffers?
478 * We really want to use invalidate_inode_pages2() for
479 * that, but not until that's cleaned up.
481 invalidate_inode_pages(bdev->bd_inode->i_mapping);
485 * Kick pdflush then try to free up some ZONE_NORMAL memory.
487 static void free_more_memory(void)
489 struct zone **zones;
490 pg_data_t *pgdat;
492 wakeup_bdflush(1024);
493 blk_run_queues();
494 yield();
496 for_each_pgdat(pgdat) {
497 zones = pgdat->node_zonelists[GFP_NOFS&GFP_ZONEMASK].zones;
498 if (*zones)
499 try_to_free_pages(zones, GFP_NOFS, 0);
504 * I/O completion handler for block_read_full_page() - pages
505 * which come unlocked at the end of I/O.
507 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
509 static spinlock_t page_uptodate_lock = SPIN_LOCK_UNLOCKED;
510 unsigned long flags;
511 struct buffer_head *tmp;
512 struct page *page;
513 int page_uptodate = 1;
515 BUG_ON(!buffer_async_read(bh));
517 page = bh->b_page;
518 if (uptodate) {
519 set_buffer_uptodate(bh);
520 } else {
521 clear_buffer_uptodate(bh);
522 buffer_io_error(bh);
523 SetPageError(page);
527 * Be _very_ careful from here on. Bad things can happen if
528 * two buffer heads end IO at almost the same time and both
529 * decide that the page is now completely done.
531 spin_lock_irqsave(&page_uptodate_lock, flags);
532 clear_buffer_async_read(bh);
533 unlock_buffer(bh);
534 tmp = bh;
535 do {
536 if (!buffer_uptodate(tmp))
537 page_uptodate = 0;
538 if (buffer_async_read(tmp)) {
539 BUG_ON(!buffer_locked(tmp));
540 goto still_busy;
542 tmp = tmp->b_this_page;
543 } while (tmp != bh);
544 spin_unlock_irqrestore(&page_uptodate_lock, flags);
547 * If none of the buffers had errors and they are all
548 * uptodate then we can set the page uptodate.
550 if (page_uptodate && !PageError(page))
551 SetPageUptodate(page);
552 unlock_page(page);
553 return;
555 still_busy:
556 spin_unlock_irqrestore(&page_uptodate_lock, flags);
557 return;
561 * Completion handler for block_write_full_page() - pages which are unlocked
562 * during I/O, and which have PageWriteback cleared upon I/O completion.
564 void end_buffer_async_write(struct buffer_head *bh, int uptodate)
566 char b[BDEVNAME_SIZE];
567 static spinlock_t page_uptodate_lock = SPIN_LOCK_UNLOCKED;
568 unsigned long flags;
569 struct buffer_head *tmp;
570 struct page *page;
572 BUG_ON(!buffer_async_write(bh));
574 page = bh->b_page;
575 if (uptodate) {
576 set_buffer_uptodate(bh);
577 } else {
578 if (printk_ratelimit()) {
579 buffer_io_error(bh);
580 printk(KERN_WARNING "lost page write due to "
581 "I/O error on %s\n",
582 bdevname(bh->b_bdev, b));
584 set_bit(AS_EIO, &page->mapping->flags);
585 clear_buffer_uptodate(bh);
586 SetPageError(page);
589 spin_lock_irqsave(&page_uptodate_lock, flags);
590 clear_buffer_async_write(bh);
591 unlock_buffer(bh);
592 tmp = bh->b_this_page;
593 while (tmp != bh) {
594 if (buffer_async_write(tmp)) {
595 BUG_ON(!buffer_locked(tmp));
596 goto still_busy;
598 tmp = tmp->b_this_page;
600 spin_unlock_irqrestore(&page_uptodate_lock, flags);
601 end_page_writeback(page);
602 return;
604 still_busy:
605 spin_unlock_irqrestore(&page_uptodate_lock, flags);
606 return;
610 * If a page's buffers are under async readin (end_buffer_async_read
611 * completion) then there is a possibility that another thread of
612 * control could lock one of the buffers after it has completed
613 * but while some of the other buffers have not completed. This
614 * locked buffer would confuse end_buffer_async_read() into not unlocking
615 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
616 * that this buffer is not under async I/O.
618 * The page comes unlocked when it has no locked buffer_async buffers
619 * left.
621 * PageLocked prevents anyone starting new async I/O reads any of
622 * the buffers.
624 * PageWriteback is used to prevent simultaneous writeout of the same
625 * page.
627 * PageLocked prevents anyone from starting writeback of a page which is
628 * under read I/O (PageWriteback is only ever set against a locked page).
630 void mark_buffer_async_read(struct buffer_head *bh)
632 bh->b_end_io = end_buffer_async_read;
633 set_buffer_async_read(bh);
635 EXPORT_SYMBOL(mark_buffer_async_read);
637 void mark_buffer_async_write(struct buffer_head *bh)
639 bh->b_end_io = end_buffer_async_write;
640 set_buffer_async_write(bh);
642 EXPORT_SYMBOL(mark_buffer_async_write);
646 * fs/buffer.c contains helper functions for buffer-backed address space's
647 * fsync functions. A common requirement for buffer-based filesystems is
648 * that certain data from the backing blockdev needs to be written out for
649 * a successful fsync(). For example, ext2 indirect blocks need to be
650 * written back and waited upon before fsync() returns.
652 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
653 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
654 * management of a list of dependent buffers at ->i_mapping->private_list.
656 * Locking is a little subtle: try_to_free_buffers() will remove buffers
657 * from their controlling inode's queue when they are being freed. But
658 * try_to_free_buffers() will be operating against the *blockdev* mapping
659 * at the time, not against the S_ISREG file which depends on those buffers.
660 * So the locking for private_list is via the private_lock in the address_space
661 * which backs the buffers. Which is different from the address_space
662 * against which the buffers are listed. So for a particular address_space,
663 * mapping->private_lock does *not* protect mapping->private_list! In fact,
664 * mapping->private_list will always be protected by the backing blockdev's
665 * ->private_lock.
667 * Which introduces a requirement: all buffers on an address_space's
668 * ->private_list must be from the same address_space: the blockdev's.
670 * address_spaces which do not place buffers at ->private_list via these
671 * utility functions are free to use private_lock and private_list for
672 * whatever they want. The only requirement is that list_empty(private_list)
673 * be true at clear_inode() time.
675 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
676 * filesystems should do that. invalidate_inode_buffers() should just go
677 * BUG_ON(!list_empty).
679 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
680 * take an address_space, not an inode. And it should be called
681 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
682 * queued up.
684 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
685 * list if it is already on a list. Because if the buffer is on a list,
686 * it *must* already be on the right one. If not, the filesystem is being
687 * silly. This will save a ton of locking. But first we have to ensure
688 * that buffers are taken *off* the old inode's list when they are freed
689 * (presumably in truncate). That requires careful auditing of all
690 * filesystems (do it inside bforget()). It could also be done by bringing
691 * b_inode back.
694 void buffer_insert_list(spinlock_t *lock,
695 struct buffer_head *bh, struct list_head *list)
697 spin_lock(lock);
698 list_move_tail(&bh->b_assoc_buffers, list);
699 spin_unlock(lock);
703 * The buffer's backing address_space's private_lock must be held
705 static inline void __remove_assoc_queue(struct buffer_head *bh)
707 list_del_init(&bh->b_assoc_buffers);
710 int inode_has_buffers(struct inode *inode)
712 return !list_empty(&inode->i_data.private_list);
716 * osync is designed to support O_SYNC io. It waits synchronously for
717 * all already-submitted IO to complete, but does not queue any new
718 * writes to the disk.
720 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
721 * you dirty the buffers, and then use osync_inode_buffers to wait for
722 * completion. Any other dirty buffers which are not yet queued for
723 * write will not be flushed to disk by the osync.
725 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
727 struct buffer_head *bh;
728 struct list_head *p;
729 int err = 0;
731 spin_lock(lock);
732 repeat:
733 list_for_each_prev(p, list) {
734 bh = BH_ENTRY(p);
735 if (buffer_locked(bh)) {
736 get_bh(bh);
737 spin_unlock(lock);
738 wait_on_buffer(bh);
739 if (!buffer_uptodate(bh))
740 err = -EIO;
741 brelse(bh);
742 spin_lock(lock);
743 goto repeat;
746 spin_unlock(lock);
747 return err;
751 * sync_mapping_buffers - write out and wait upon a mapping's "associated"
752 * buffers
753 * @buffer_mapping - the mapping which backs the buffers' data
754 * @mapping - the mapping which wants those buffers written
756 * Starts I/O against the buffers at mapping->private_list, and waits upon
757 * that I/O.
759 * Basically, this is a convenience function for fsync(). @buffer_mapping is
760 * the blockdev which "owns" the buffers and @mapping is a file or directory
761 * which needs those buffers to be written for a successful fsync().
763 int sync_mapping_buffers(struct address_space *mapping)
765 struct address_space *buffer_mapping = mapping->assoc_mapping;
767 if (buffer_mapping == NULL || list_empty(&mapping->private_list))
768 return 0;
770 return fsync_buffers_list(&buffer_mapping->private_lock,
771 &mapping->private_list);
773 EXPORT_SYMBOL(sync_mapping_buffers);
776 * Called when we've recently written block `bblock', and it is known that
777 * `bblock' was for a buffer_boundary() buffer. This means that the block at
778 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
779 * dirty, schedule it for IO. So that indirects merge nicely with their data.
781 void write_boundary_block(struct block_device *bdev,
782 sector_t bblock, unsigned blocksize)
784 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
785 if (bh) {
786 if (buffer_dirty(bh))
787 ll_rw_block(WRITE, 1, &bh);
788 put_bh(bh);
792 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
794 struct address_space *mapping = inode->i_mapping;
795 struct address_space *buffer_mapping = bh->b_page->mapping;
797 mark_buffer_dirty(bh);
798 if (!mapping->assoc_mapping) {
799 mapping->assoc_mapping = buffer_mapping;
800 } else {
801 if (mapping->assoc_mapping != buffer_mapping)
802 BUG();
804 if (list_empty(&bh->b_assoc_buffers))
805 buffer_insert_list(&buffer_mapping->private_lock,
806 bh, &mapping->private_list);
808 EXPORT_SYMBOL(mark_buffer_dirty_inode);
811 * Add a page to the dirty page list.
813 * It is a sad fact of life that this function is called from several places
814 * deeply under spinlocking. It may not sleep.
816 * If the page has buffers, the uptodate buffers are set dirty, to preserve
817 * dirty-state coherency between the page and the buffers. It the page does
818 * not have buffers then when they are later attached they will all be set
819 * dirty.
821 * The buffers are dirtied before the page is dirtied. There's a small race
822 * window in which a writepage caller may see the page cleanness but not the
823 * buffer dirtiness. That's fine. If this code were to set the page dirty
824 * before the buffers, a concurrent writepage caller could clear the page dirty
825 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
826 * page on the dirty page list.
828 * There is also a small window where the page is dirty, and not on dirty_pages.
829 * Also a possibility that by the time the page is added to dirty_pages, it has
830 * been set clean. The page lists are somewhat approximate in this regard.
831 * It's better to have clean pages accidentally attached to dirty_pages than to
832 * leave dirty pages attached to clean_pages.
834 * We use private_lock to lock against try_to_free_buffers while using the
835 * page's buffer list. Also use this to protect against clean buffers being
836 * added to the page after it was set dirty.
838 * FIXME: may need to call ->reservepage here as well. That's rather up to the
839 * address_space though.
841 * For now, we treat swapper_space specially. It doesn't use the normal
842 * block a_ops.
844 int __set_page_dirty_buffers(struct page *page)
846 struct address_space * const mapping = page->mapping;
847 int ret = 0;
849 if (mapping == NULL) {
850 SetPageDirty(page);
851 goto out;
854 spin_lock(&mapping->private_lock);
855 if (page_has_buffers(page)) {
856 struct buffer_head *head = page_buffers(page);
857 struct buffer_head *bh = head;
859 do {
860 if (buffer_uptodate(bh))
861 set_buffer_dirty(bh);
862 else
863 buffer_error();
864 bh = bh->b_this_page;
865 } while (bh != head);
867 spin_unlock(&mapping->private_lock);
869 if (!TestSetPageDirty(page)) {
870 spin_lock(&mapping->page_lock);
871 if (page->mapping) { /* Race with truncate? */
872 if (!mapping->backing_dev_info->memory_backed)
873 inc_page_state(nr_dirty);
874 list_del(&page->list);
875 list_add(&page->list, &mapping->dirty_pages);
877 spin_unlock(&mapping->page_lock);
878 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
881 out:
882 return ret;
884 EXPORT_SYMBOL(__set_page_dirty_buffers);
887 * Write out and wait upon a list of buffers.
889 * We have conflicting pressures: we want to make sure that all
890 * initially dirty buffers get waited on, but that any subsequently
891 * dirtied buffers don't. After all, we don't want fsync to last
892 * forever if somebody is actively writing to the file.
894 * Do this in two main stages: first we copy dirty buffers to a
895 * temporary inode list, queueing the writes as we go. Then we clean
896 * up, waiting for those writes to complete.
898 * During this second stage, any subsequent updates to the file may end
899 * up refiling the buffer on the original inode's dirty list again, so
900 * there is a chance we will end up with a buffer queued for write but
901 * not yet completed on that list. So, as a final cleanup we go through
902 * the osync code to catch these locked, dirty buffers without requeuing
903 * any newly dirty buffers for write.
905 int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
907 struct buffer_head *bh;
908 struct list_head tmp;
909 int err = 0, err2;
911 INIT_LIST_HEAD(&tmp);
913 spin_lock(lock);
914 while (!list_empty(list)) {
915 bh = BH_ENTRY(list->next);
916 list_del_init(&bh->b_assoc_buffers);
917 if (buffer_dirty(bh) || buffer_locked(bh)) {
918 list_add(&bh->b_assoc_buffers, &tmp);
919 if (buffer_dirty(bh)) {
920 get_bh(bh);
921 spin_unlock(lock);
923 * Ensure any pending I/O completes so that
924 * ll_rw_block() actually writes the current
925 * contents - it is a noop if I/O is still in
926 * flight on potentially older contents.
928 wait_on_buffer(bh);
929 ll_rw_block(WRITE, 1, &bh);
930 brelse(bh);
931 spin_lock(lock);
936 while (!list_empty(&tmp)) {
937 bh = BH_ENTRY(tmp.prev);
938 __remove_assoc_queue(bh);
939 get_bh(bh);
940 spin_unlock(lock);
941 wait_on_buffer(bh);
942 if (!buffer_uptodate(bh))
943 err = -EIO;
944 brelse(bh);
945 spin_lock(lock);
948 spin_unlock(lock);
949 err2 = osync_buffers_list(lock, list);
950 if (err)
951 return err;
952 else
953 return err2;
957 * Invalidate any and all dirty buffers on a given inode. We are
958 * probably unmounting the fs, but that doesn't mean we have already
959 * done a sync(). Just drop the buffers from the inode list.
961 * NOTE: we take the inode's blockdev's mapping's private_lock. Which
962 * assumes that all the buffers are against the blockdev. Not true
963 * for reiserfs.
965 void invalidate_inode_buffers(struct inode *inode)
967 if (inode_has_buffers(inode)) {
968 struct address_space *mapping = &inode->i_data;
969 struct list_head *list = &mapping->private_list;
970 struct address_space *buffer_mapping = mapping->assoc_mapping;
972 spin_lock(&buffer_mapping->private_lock);
973 while (!list_empty(list))
974 __remove_assoc_queue(BH_ENTRY(list->next));
975 spin_unlock(&buffer_mapping->private_lock);
980 * Remove any clean buffers from the inode's buffer list. This is called
981 * when we're trying to free the inode itself. Those buffers can pin it.
983 * Returns true if all buffers were removed.
985 int remove_inode_buffers(struct inode *inode)
987 int ret = 1;
989 if (inode_has_buffers(inode)) {
990 struct address_space *mapping = &inode->i_data;
991 struct list_head *list = &mapping->private_list;
992 struct address_space *buffer_mapping = mapping->assoc_mapping;
994 spin_lock(&buffer_mapping->private_lock);
995 while (!list_empty(list)) {
996 struct buffer_head *bh = BH_ENTRY(list->next);
997 if (buffer_dirty(bh)) {
998 ret = 0;
999 break;
1001 __remove_assoc_queue(bh);
1003 spin_unlock(&buffer_mapping->private_lock);
1005 return ret;
1009 * Create the appropriate buffers when given a page for data area and
1010 * the size of each buffer.. Use the bh->b_this_page linked list to
1011 * follow the buffers created. Return NULL if unable to create more
1012 * buffers.
1014 * The retry flag is used to differentiate async IO (paging, swapping)
1015 * which may not fail from ordinary buffer allocations.
1017 static struct buffer_head *
1018 create_buffers(struct page * page, unsigned long size, int retry)
1020 struct buffer_head *bh, *head;
1021 long offset;
1023 try_again:
1024 head = NULL;
1025 offset = PAGE_SIZE;
1026 while ((offset -= size) >= 0) {
1027 bh = alloc_buffer_head(GFP_NOFS);
1028 if (!bh)
1029 goto no_grow;
1031 bh->b_bdev = NULL;
1032 bh->b_this_page = head;
1033 bh->b_blocknr = -1;
1034 head = bh;
1036 bh->b_state = 0;
1037 atomic_set(&bh->b_count, 0);
1038 bh->b_size = size;
1040 /* Link the buffer to its page */
1041 set_bh_page(bh, page, offset);
1043 bh->b_end_io = NULL;
1045 return head;
1047 * In case anything failed, we just free everything we got.
1049 no_grow:
1050 if (head) {
1051 do {
1052 bh = head;
1053 head = head->b_this_page;
1054 free_buffer_head(bh);
1055 } while (head);
1059 * Return failure for non-async IO requests. Async IO requests
1060 * are not allowed to fail, so we have to wait until buffer heads
1061 * become available. But we don't want tasks sleeping with
1062 * partially complete buffers, so all were released above.
1064 if (!retry)
1065 return NULL;
1067 /* We're _really_ low on memory. Now we just
1068 * wait for old buffer heads to become free due to
1069 * finishing IO. Since this is an async request and
1070 * the reserve list is empty, we're sure there are
1071 * async buffer heads in use.
1073 free_more_memory();
1074 goto try_again;
1077 static inline void
1078 link_dev_buffers(struct page *page, struct buffer_head *head)
1080 struct buffer_head *bh, *tail;
1082 bh = head;
1083 do {
1084 tail = bh;
1085 bh = bh->b_this_page;
1086 } while (bh);
1087 tail->b_this_page = head;
1088 __set_page_buffers(page, head);
1092 * Initialise the state of a blockdev page's buffers.
1094 static void
1095 init_page_buffers(struct page *page, struct block_device *bdev,
1096 int block, int size)
1098 struct buffer_head *head = page_buffers(page);
1099 struct buffer_head *bh = head;
1100 unsigned int b_state;
1102 b_state = 1 << BH_Mapped;
1103 if (PageUptodate(page))
1104 b_state |= 1 << BH_Uptodate;
1106 do {
1107 if (!(bh->b_state & (1 << BH_Mapped))) {
1108 init_buffer(bh, NULL, NULL);
1109 bh->b_bdev = bdev;
1110 bh->b_blocknr = block;
1111 bh->b_state = b_state;
1113 block++;
1114 bh = bh->b_this_page;
1115 } while (bh != head);
1119 * Create the page-cache page that contains the requested block.
1121 * This is user purely for blockdev mappings.
1123 static struct page *
1124 grow_dev_page(struct block_device *bdev, unsigned long block,
1125 unsigned long index, int size)
1127 struct inode *inode = bdev->bd_inode;
1128 struct page *page;
1129 struct buffer_head *bh;
1131 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
1132 if (!page)
1133 return NULL;
1135 if (!PageLocked(page))
1136 BUG();
1138 if (page_has_buffers(page)) {
1139 bh = page_buffers(page);
1140 if (bh->b_size == size)
1141 return page;
1142 if (!try_to_free_buffers(page))
1143 goto failed;
1147 * Allocate some buffers for this page
1149 bh = create_buffers(page, size, 0);
1150 if (!bh)
1151 goto failed;
1154 * Link the page to the buffers and initialise them. Take the
1155 * lock to be atomic wrt __find_get_block(), which does not
1156 * run under the page lock.
1158 spin_lock(&inode->i_mapping->private_lock);
1159 link_dev_buffers(page, bh);
1160 init_page_buffers(page, bdev, block, size);
1161 spin_unlock(&inode->i_mapping->private_lock);
1162 return page;
1164 failed:
1165 buffer_error();
1166 unlock_page(page);
1167 page_cache_release(page);
1168 return NULL;
1172 * Create buffers for the specified block device block's page. If
1173 * that page was dirty, the buffers are set dirty also.
1175 * Except that's a bug. Attaching dirty buffers to a dirty
1176 * blockdev's page can result in filesystem corruption, because
1177 * some of those buffers may be aliases of filesystem data.
1178 * grow_dev_page() will go BUG() if this happens.
1180 static inline int
1181 grow_buffers(struct block_device *bdev, unsigned long block, int size)
1183 struct page *page;
1184 unsigned long index;
1185 int sizebits;
1187 /* Size must be multiple of hard sectorsize */
1188 if (size & (bdev_hardsect_size(bdev)-1))
1189 BUG();
1190 if (size < 512 || size > PAGE_SIZE)
1191 BUG();
1193 sizebits = -1;
1194 do {
1195 sizebits++;
1196 } while ((size << sizebits) < PAGE_SIZE);
1198 index = block >> sizebits;
1199 block = index << sizebits;
1201 /* Create a page with the proper size buffers.. */
1202 page = grow_dev_page(bdev, block, index, size);
1203 if (!page)
1204 return 0;
1205 unlock_page(page);
1206 page_cache_release(page);
1207 return 1;
1210 struct buffer_head *
1211 __getblk_slow(struct block_device *bdev, sector_t block, int size)
1213 for (;;) {
1214 struct buffer_head * bh;
1216 bh = __find_get_block(bdev, block, size);
1217 if (bh)
1218 return bh;
1220 if (!grow_buffers(bdev, block, size))
1221 free_more_memory();
1226 * The relationship between dirty buffers and dirty pages:
1228 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1229 * the page appears on its address_space.dirty_pages list.
1231 * At all times, the dirtiness of the buffers represents the dirtiness of
1232 * subsections of the page. If the page has buffers, the page dirty bit is
1233 * merely a hint about the true dirty state.
1235 * When a page is set dirty in its entirety, all its buffers are marked dirty
1236 * (if the page has buffers).
1238 * When a buffer is marked dirty, its page is dirtied, but the page's other
1239 * buffers are not.
1241 * Also. When blockdev buffers are explicitly read with bread(), they
1242 * individually become uptodate. But their backing page remains not
1243 * uptodate - even if all of its buffers are uptodate. A subsequent
1244 * block_read_full_page() against that page will discover all the uptodate
1245 * buffers, will set the page uptodate and will perform no I/O.
1249 * mark_buffer_dirty - mark a buffer_head as needing writeout
1251 * mark_buffer_dirty() will set the dirty bit against the buffer,
1252 * then set its backing page dirty, then attach the page to its
1253 * address_space's dirty_pages list and then attach the address_space's
1254 * inode to its superblock's dirty inode list.
1256 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1257 * mapping->page_lock and the global inode_lock.
1259 void mark_buffer_dirty(struct buffer_head *bh)
1261 if (!buffer_uptodate(bh))
1262 buffer_error();
1263 if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
1264 __set_page_dirty_nobuffers(bh->b_page);
1268 * Decrement a buffer_head's reference count. If all buffers against a page
1269 * have zero reference count, are clean and unlocked, and if the page is clean
1270 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1271 * in preparation for freeing it (sometimes, rarely, buffers are removed from
1272 * a page but it ends up not being freed, and buffers may later be reattached).
1274 void __brelse(struct buffer_head * buf)
1276 if (atomic_read(&buf->b_count)) {
1277 put_bh(buf);
1278 return;
1280 printk(KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1281 buffer_error(); /* For the stack backtrace */
1285 * bforget() is like brelse(), except it discards any
1286 * potentially dirty data.
1288 void __bforget(struct buffer_head *bh)
1290 clear_buffer_dirty(bh);
1291 if (!list_empty(&bh->b_assoc_buffers)) {
1292 struct address_space *buffer_mapping = bh->b_page->mapping;
1294 spin_lock(&buffer_mapping->private_lock);
1295 list_del_init(&bh->b_assoc_buffers);
1296 spin_unlock(&buffer_mapping->private_lock);
1298 __brelse(bh);
1301 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1303 lock_buffer(bh);
1304 if (buffer_uptodate(bh)) {
1305 unlock_buffer(bh);
1306 return bh;
1307 } else {
1308 if (buffer_dirty(bh))
1309 buffer_error();
1310 get_bh(bh);
1311 bh->b_end_io = end_buffer_read_sync;
1312 submit_bh(READ, bh);
1313 wait_on_buffer(bh);
1314 if (buffer_uptodate(bh))
1315 return bh;
1317 brelse(bh);
1318 return NULL;
1322 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1323 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1324 * refcount elevated by one when they're in an LRU. A buffer can only appear
1325 * once in a particular CPU's LRU. A single buffer can be present in multiple
1326 * CPU's LRUs at the same time.
1328 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1329 * sb_find_get_block().
1331 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1332 * a local interrupt disable for that.
1335 #define BH_LRU_SIZE 8
1337 struct bh_lru {
1338 struct buffer_head *bhs[BH_LRU_SIZE];
1341 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{0}};
1343 #ifdef CONFIG_SMP
1344 #define bh_lru_lock() local_irq_disable()
1345 #define bh_lru_unlock() local_irq_enable()
1346 #else
1347 #define bh_lru_lock() preempt_disable()
1348 #define bh_lru_unlock() preempt_enable()
1349 #endif
1351 static inline void check_irqs_on(void)
1353 #ifdef irqs_disabled
1354 BUG_ON(irqs_disabled());
1355 #endif
1359 * The LRU management algorithm is dopey-but-simple. Sorry.
1361 static void bh_lru_install(struct buffer_head *bh)
1363 struct buffer_head *evictee = NULL;
1364 struct bh_lru *lru;
1366 check_irqs_on();
1367 bh_lru_lock();
1368 lru = &__get_cpu_var(bh_lrus);
1369 if (lru->bhs[0] != bh) {
1370 struct buffer_head *bhs[BH_LRU_SIZE];
1371 int in;
1372 int out = 0;
1374 get_bh(bh);
1375 bhs[out++] = bh;
1376 for (in = 0; in < BH_LRU_SIZE; in++) {
1377 struct buffer_head *bh2 = lru->bhs[in];
1379 if (bh2 == bh) {
1380 __brelse(bh2);
1381 } else {
1382 if (out >= BH_LRU_SIZE) {
1383 BUG_ON(evictee != NULL);
1384 evictee = bh2;
1385 } else {
1386 bhs[out++] = bh2;
1390 while (out < BH_LRU_SIZE)
1391 bhs[out++] = NULL;
1392 memcpy(lru->bhs, bhs, sizeof(bhs));
1394 bh_lru_unlock();
1396 if (evictee)
1397 __brelse(evictee);
1401 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1403 static inline struct buffer_head *
1404 lookup_bh_lru(struct block_device *bdev, sector_t block, int size)
1406 struct buffer_head *ret = NULL;
1407 struct bh_lru *lru;
1408 int i;
1410 check_irqs_on();
1411 bh_lru_lock();
1412 lru = &__get_cpu_var(bh_lrus);
1413 for (i = 0; i < BH_LRU_SIZE; i++) {
1414 struct buffer_head *bh = lru->bhs[i];
1416 if (bh && bh->b_bdev == bdev &&
1417 bh->b_blocknr == block && bh->b_size == size) {
1418 if (i) {
1419 while (i) {
1420 lru->bhs[i] = lru->bhs[i - 1];
1421 i--;
1423 lru->bhs[0] = bh;
1425 get_bh(bh);
1426 ret = bh;
1427 break;
1430 bh_lru_unlock();
1431 return ret;
1435 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1436 * it in the LRU and mark it as accessed. If it is not present then return
1437 * NULL
1439 struct buffer_head *
1440 __find_get_block(struct block_device *bdev, sector_t block, int size)
1442 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1444 if (bh == NULL) {
1445 bh = __find_get_block_slow(bdev, block, size);
1446 if (bh)
1447 bh_lru_install(bh);
1449 if (bh)
1450 touch_buffer(bh);
1451 return bh;
1453 EXPORT_SYMBOL(__find_get_block);
1456 * __getblk will locate (and, if necessary, create) the buffer_head
1457 * which corresponds to the passed block_device, block and size. The
1458 * returned buffer has its reference count incremented.
1460 * __getblk() cannot fail - it just keeps trying. If you pass it an
1461 * illegal block number, __getblk() will happily return a buffer_head
1462 * which represents the non-existent block. Very weird.
1464 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1465 * attempt is failing. FIXME, perhaps?
1467 struct buffer_head *
1468 __getblk(struct block_device *bdev, sector_t block, int size)
1470 struct buffer_head *bh = __find_get_block(bdev, block, size);
1472 if (bh == NULL)
1473 bh = __getblk_slow(bdev, block, size);
1474 return bh;
1476 EXPORT_SYMBOL(__getblk);
1479 * Do async read-ahead on a buffer..
1481 void __breadahead(struct block_device *bdev, sector_t block, int size)
1483 struct buffer_head *bh = __getblk(bdev, block, size);
1484 ll_rw_block(READA, 1, &bh);
1485 brelse(bh);
1487 EXPORT_SYMBOL(__breadahead);
1490 * __bread() - reads a specified block and returns the bh
1491 * @block: number of block
1492 * @size: size (in bytes) to read
1494 * Reads a specified block, and returns buffer head that contains it.
1495 * It returns NULL if the block was unreadable.
1497 struct buffer_head *
1498 __bread(struct block_device *bdev, sector_t block, int size)
1500 struct buffer_head *bh = __getblk(bdev, block, size);
1502 if (!buffer_uptodate(bh))
1503 bh = __bread_slow(bh);
1504 return bh;
1506 EXPORT_SYMBOL(__bread);
1509 * invalidate_bh_lrus() is called rarely - at unmount. Because it is only for
1510 * unmount it only needs to ensure that all buffers from the target device are
1511 * invalidated on return and it doesn't need to worry about new buffers from
1512 * that device being added - the unmount code has to prevent that.
1514 static void invalidate_bh_lru(void *arg)
1516 struct bh_lru *b = &get_cpu_var(bh_lrus);
1517 int i;
1519 for (i = 0; i < BH_LRU_SIZE; i++) {
1520 brelse(b->bhs[i]);
1521 b->bhs[i] = NULL;
1523 put_cpu_var(bh_lrus);
1526 static void invalidate_bh_lrus(void)
1528 on_each_cpu(invalidate_bh_lru, NULL, 1, 1);
1531 void set_bh_page(struct buffer_head *bh,
1532 struct page *page, unsigned long offset)
1534 bh->b_page = page;
1535 if (offset >= PAGE_SIZE)
1536 BUG();
1537 if (PageHighMem(page))
1539 * This catches illegal uses and preserves the offset:
1541 bh->b_data = (char *)(0 + offset);
1542 else
1543 bh->b_data = page_address(page) + offset;
1545 EXPORT_SYMBOL(set_bh_page);
1548 * Called when truncating a buffer on a page completely.
1550 static inline void discard_buffer(struct buffer_head * bh)
1552 lock_buffer(bh);
1553 clear_buffer_dirty(bh);
1554 bh->b_bdev = NULL;
1555 clear_buffer_mapped(bh);
1556 clear_buffer_req(bh);
1557 clear_buffer_new(bh);
1558 clear_buffer_delay(bh);
1559 unlock_buffer(bh);
1563 * try_to_release_page() - release old fs-specific metadata on a page
1565 * @page: the page which the kernel is trying to free
1566 * @gfp_mask: memory allocation flags (and I/O mode)
1568 * The address_space is to try to release any data against the page
1569 * (presumably at page->private). If the release was successful, return `1'.
1570 * Otherwise return zero.
1572 * The @gfp_mask argument specifies whether I/O may be performed to release
1573 * this page (__GFP_IO), and whether the call may block (__GFP_WAIT).
1575 * NOTE: @gfp_mask may go away, and this function may become non-blocking.
1577 int try_to_release_page(struct page *page, int gfp_mask)
1579 struct address_space * const mapping = page->mapping;
1581 if (!PageLocked(page))
1582 BUG();
1583 if (PageWriteback(page))
1584 return 0;
1586 if (mapping && mapping->a_ops->releasepage)
1587 return mapping->a_ops->releasepage(page, gfp_mask);
1588 return try_to_free_buffers(page);
1592 * block_invalidatepage - invalidate part of all of a buffer-backed page
1594 * @page: the page which is affected
1595 * @offset: the index of the truncation point
1597 * block_invalidatepage() is called when all or part of the page has become
1598 * invalidatedby a truncate operation.
1600 * block_invalidatepage() does not have to release all buffers, but it must
1601 * ensure that no dirty buffer is left outside @offset and that no I/O
1602 * is underway against any of the blocks which are outside the truncation
1603 * point. Because the caller is about to free (and possibly reuse) those
1604 * blocks on-disk.
1606 int block_invalidatepage(struct page *page, unsigned long offset)
1608 struct buffer_head *head, *bh, *next;
1609 unsigned int curr_off = 0;
1610 int ret = 1;
1612 BUG_ON(!PageLocked(page));
1613 if (!page_has_buffers(page))
1614 goto out;
1616 head = page_buffers(page);
1617 bh = head;
1618 do {
1619 unsigned int next_off = curr_off + bh->b_size;
1620 next = bh->b_this_page;
1623 * is this block fully invalidated?
1625 if (offset <= curr_off)
1626 discard_buffer(bh);
1627 curr_off = next_off;
1628 bh = next;
1629 } while (bh != head);
1632 * We release buffers only if the entire page is being invalidated.
1633 * The get_block cached value has been unconditionally invalidated,
1634 * so real IO is not possible anymore.
1636 if (offset == 0)
1637 ret = try_to_release_page(page, 0);
1638 out:
1639 return ret;
1641 EXPORT_SYMBOL(block_invalidatepage);
1644 * We attach and possibly dirty the buffers atomically wrt
1645 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
1646 * is already excluded via the page lock.
1648 void create_empty_buffers(struct page *page,
1649 unsigned long blocksize, unsigned long b_state)
1651 struct buffer_head *bh, *head, *tail;
1653 head = create_buffers(page, blocksize, 1);
1654 bh = head;
1655 do {
1656 bh->b_state |= b_state;
1657 tail = bh;
1658 bh = bh->b_this_page;
1659 } while (bh);
1660 tail->b_this_page = head;
1662 spin_lock(&page->mapping->private_lock);
1663 if (PageUptodate(page) || PageDirty(page)) {
1664 bh = head;
1665 do {
1666 if (PageDirty(page))
1667 set_buffer_dirty(bh);
1668 if (PageUptodate(page))
1669 set_buffer_uptodate(bh);
1670 bh = bh->b_this_page;
1671 } while (bh != head);
1673 __set_page_buffers(page, head);
1674 spin_unlock(&page->mapping->private_lock);
1676 EXPORT_SYMBOL(create_empty_buffers);
1679 * We are taking a block for data and we don't want any output from any
1680 * buffer-cache aliases starting from return from that function and
1681 * until the moment when something will explicitly mark the buffer
1682 * dirty (hopefully that will not happen until we will free that block ;-)
1683 * We don't even need to mark it not-uptodate - nobody can expect
1684 * anything from a newly allocated buffer anyway. We used to used
1685 * unmap_buffer() for such invalidation, but that was wrong. We definitely
1686 * don't want to mark the alias unmapped, for example - it would confuse
1687 * anyone who might pick it with bread() afterwards...
1689 * Also.. Note that bforget() doesn't lock the buffer. So there can
1690 * be writeout I/O going on against recently-freed buffers. We don't
1691 * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1692 * only if we really need to. That happens here.
1694 void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1696 struct buffer_head *old_bh;
1698 old_bh = __find_get_block_slow(bdev, block, 0);
1699 if (old_bh) {
1700 #if 0 /* This happens. Later. */
1701 if (buffer_dirty(old_bh))
1702 buffer_error();
1703 #endif
1704 clear_buffer_dirty(old_bh);
1705 wait_on_buffer(old_bh);
1706 clear_buffer_req(old_bh);
1707 __brelse(old_bh);
1710 EXPORT_SYMBOL(unmap_underlying_metadata);
1713 * NOTE! All mapped/uptodate combinations are valid:
1715 * Mapped Uptodate Meaning
1717 * No No "unknown" - must do get_block()
1718 * No Yes "hole" - zero-filled
1719 * Yes No "allocated" - allocated on disk, not read in
1720 * Yes Yes "valid" - allocated and up-to-date in memory.
1722 * "Dirty" is valid only with the last case (mapped+uptodate).
1726 * While block_write_full_page is writing back the dirty buffers under
1727 * the page lock, whoever dirtied the buffers may decide to clean them
1728 * again at any time. We handle that by only looking at the buffer
1729 * state inside lock_buffer().
1731 * If block_write_full_page() is called for regular writeback
1732 * (called_for_sync() is false) then it will redirty a page which has a locked
1733 * buffer. This only can happen if someone has written the buffer directly,
1734 * with submit_bh(). At the address_space level PageWriteback prevents this
1735 * contention from occurring.
1737 static int __block_write_full_page(struct inode *inode, struct page *page,
1738 get_block_t *get_block, struct writeback_control *wbc)
1740 int err;
1741 unsigned long block;
1742 unsigned long last_block;
1743 struct buffer_head *bh, *head;
1744 int nr_underway = 0;
1746 BUG_ON(!PageLocked(page));
1748 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1750 if (!page_has_buffers(page)) {
1751 if (!PageUptodate(page))
1752 buffer_error();
1753 create_empty_buffers(page, 1 << inode->i_blkbits,
1754 (1 << BH_Dirty)|(1 << BH_Uptodate));
1758 * Be very careful. We have no exclusion from __set_page_dirty_buffers
1759 * here, and the (potentially unmapped) buffers may become dirty at
1760 * any time. If a buffer becomes dirty here after we've inspected it
1761 * then we just miss that fact, and the page stays dirty.
1763 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1764 * handle that here by just cleaning them.
1767 block = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1768 head = page_buffers(page);
1769 bh = head;
1772 * Get all the dirty buffers mapped to disk addresses and
1773 * handle any aliases from the underlying blockdev's mapping.
1775 do {
1776 if (block > last_block) {
1778 * mapped buffers outside i_size will occur, because
1779 * this page can be outside i_size when there is a
1780 * truncate in progress.
1782 * if (buffer_mapped(bh))
1783 * buffer_error();
1786 * The buffer was zeroed by block_write_full_page()
1788 clear_buffer_dirty(bh);
1789 set_buffer_uptodate(bh);
1790 } else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
1791 if (buffer_new(bh))
1792 buffer_error();
1793 err = get_block(inode, block, bh, 1);
1794 if (err)
1795 goto recover;
1796 if (buffer_new(bh)) {
1797 /* blockdev mappings never come here */
1798 clear_buffer_new(bh);
1799 unmap_underlying_metadata(bh->b_bdev,
1800 bh->b_blocknr);
1803 bh = bh->b_this_page;
1804 block++;
1805 } while (bh != head);
1807 do {
1808 get_bh(bh);
1809 if (buffer_mapped(bh) && buffer_dirty(bh)) {
1810 if (wbc->sync_mode != WB_SYNC_NONE) {
1811 lock_buffer(bh);
1812 } else {
1813 if (test_set_buffer_locked(bh)) {
1814 __set_page_dirty_nobuffers(page);
1815 continue;
1818 if (test_clear_buffer_dirty(bh)) {
1819 if (!buffer_uptodate(bh))
1820 buffer_error();
1821 mark_buffer_async_write(bh);
1822 } else {
1823 unlock_buffer(bh);
1826 } while ((bh = bh->b_this_page) != head);
1828 BUG_ON(PageWriteback(page));
1829 SetPageWriteback(page); /* Keeps try_to_free_buffers() away */
1830 unlock_page(page);
1833 * The page may come unlocked any time after the *first* submit_bh()
1834 * call. Be careful with its buffers.
1836 do {
1837 struct buffer_head *next = bh->b_this_page;
1838 if (buffer_async_write(bh)) {
1839 submit_bh(WRITE, bh);
1840 nr_underway++;
1842 put_bh(bh);
1843 bh = next;
1844 } while (bh != head);
1846 err = 0;
1847 done:
1848 if (nr_underway == 0) {
1850 * The page was marked dirty, but the buffers were
1851 * clean. Someone wrote them back by hand with
1852 * ll_rw_block/submit_bh. A rare case.
1854 int uptodate = 1;
1855 do {
1856 if (!buffer_uptodate(bh)) {
1857 uptodate = 0;
1858 break;
1860 bh = bh->b_this_page;
1861 } while (bh != head);
1862 if (uptodate)
1863 SetPageUptodate(page);
1864 end_page_writeback(page);
1866 return err;
1868 recover:
1870 * ENOSPC, or some other error. We may already have added some
1871 * blocks to the file, so we need to write these out to avoid
1872 * exposing stale data.
1873 * The page is currently locked and not marked for writeback
1875 bh = head;
1876 /* Recovery: lock and submit the mapped buffers */
1877 do {
1878 get_bh(bh);
1879 if (buffer_mapped(bh) && buffer_dirty(bh)) {
1880 lock_buffer(bh);
1881 mark_buffer_async_write(bh);
1882 } else {
1884 * The buffer may have been set dirty during
1885 * attachment to a dirty page.
1887 clear_buffer_dirty(bh);
1889 } while ((bh = bh->b_this_page) != head);
1890 SetPageError(page);
1891 BUG_ON(PageWriteback(page));
1892 SetPageWriteback(page);
1893 unlock_page(page);
1894 do {
1895 struct buffer_head *next = bh->b_this_page;
1896 if (buffer_async_write(bh)) {
1897 clear_buffer_dirty(bh);
1898 submit_bh(WRITE, bh);
1899 nr_underway++;
1901 put_bh(bh);
1902 bh = next;
1903 } while (bh != head);
1904 goto done;
1907 static int __block_prepare_write(struct inode *inode, struct page *page,
1908 unsigned from, unsigned to, get_block_t *get_block)
1910 unsigned block_start, block_end;
1911 sector_t block;
1912 int err = 0;
1913 unsigned blocksize, bbits;
1914 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1916 BUG_ON(!PageLocked(page));
1917 BUG_ON(from > PAGE_CACHE_SIZE);
1918 BUG_ON(to > PAGE_CACHE_SIZE);
1919 BUG_ON(from > to);
1921 blocksize = 1 << inode->i_blkbits;
1922 if (!page_has_buffers(page))
1923 create_empty_buffers(page, blocksize, 0);
1924 head = page_buffers(page);
1926 bbits = inode->i_blkbits;
1927 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1929 for(bh = head, block_start = 0; bh != head || !block_start;
1930 block++, block_start=block_end, bh = bh->b_this_page) {
1931 block_end = block_start + blocksize;
1932 if (block_end <= from || block_start >= to) {
1933 if (PageUptodate(page)) {
1934 if (!buffer_uptodate(bh))
1935 set_buffer_uptodate(bh);
1937 continue;
1939 if (buffer_new(bh))
1940 clear_buffer_new(bh);
1941 if (!buffer_mapped(bh)) {
1942 err = get_block(inode, block, bh, 1);
1943 if (err)
1944 goto out;
1945 if (buffer_new(bh)) {
1946 clear_buffer_new(bh);
1947 unmap_underlying_metadata(bh->b_bdev,
1948 bh->b_blocknr);
1949 if (PageUptodate(page)) {
1950 if (!buffer_mapped(bh))
1951 buffer_error();
1952 set_buffer_uptodate(bh);
1953 continue;
1955 if (block_end > to || block_start < from) {
1956 void *kaddr;
1958 kaddr = kmap_atomic(page, KM_USER0);
1959 if (block_end > to)
1960 memset(kaddr+to, 0,
1961 block_end-to);
1962 if (block_start < from)
1963 memset(kaddr+block_start,
1964 0, from-block_start);
1965 flush_dcache_page(page);
1966 kunmap_atomic(kaddr, KM_USER0);
1968 continue;
1971 if (PageUptodate(page)) {
1972 if (!buffer_uptodate(bh))
1973 set_buffer_uptodate(bh);
1974 continue;
1976 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1977 (block_start < from || block_end > to)) {
1978 ll_rw_block(READ, 1, &bh);
1979 *wait_bh++=bh;
1983 * If we issued read requests - let them complete.
1985 while(wait_bh > wait) {
1986 wait_on_buffer(*--wait_bh);
1987 if (!buffer_uptodate(*wait_bh))
1988 return -EIO;
1990 return 0;
1991 out:
1993 * Zero out any newly allocated blocks to avoid exposing stale
1994 * data. If BH_New is set, we know that the block was newly
1995 * allocated in the above loop.
1997 bh = head;
1998 block_start = 0;
1999 do {
2000 block_end = block_start+blocksize;
2001 if (block_end <= from)
2002 goto next_bh;
2003 if (block_start >= to)
2004 break;
2005 if (buffer_new(bh)) {
2006 void *kaddr;
2008 clear_buffer_new(bh);
2009 if (buffer_uptodate(bh))
2010 buffer_error();
2011 kaddr = kmap_atomic(page, KM_USER0);
2012 memset(kaddr+block_start, 0, bh->b_size);
2013 kunmap_atomic(kaddr, KM_USER0);
2014 set_buffer_uptodate(bh);
2015 mark_buffer_dirty(bh);
2017 next_bh:
2018 block_start = block_end;
2019 bh = bh->b_this_page;
2020 } while (bh != head);
2021 return err;
2024 static int __block_commit_write(struct inode *inode, struct page *page,
2025 unsigned from, unsigned to)
2027 unsigned block_start, block_end;
2028 int partial = 0;
2029 unsigned blocksize;
2030 struct buffer_head *bh, *head;
2032 blocksize = 1 << inode->i_blkbits;
2034 for(bh = head = page_buffers(page), block_start = 0;
2035 bh != head || !block_start;
2036 block_start=block_end, bh = bh->b_this_page) {
2037 block_end = block_start + blocksize;
2038 if (block_end <= from || block_start >= to) {
2039 if (!buffer_uptodate(bh))
2040 partial = 1;
2041 } else {
2042 set_buffer_uptodate(bh);
2043 mark_buffer_dirty(bh);
2048 * If this is a partial write which happened to make all buffers
2049 * uptodate then we can optimize away a bogus readpage() for
2050 * the next read(). Here we 'discover' whether the page went
2051 * uptodate as a result of this (potentially partial) write.
2053 if (!partial)
2054 SetPageUptodate(page);
2055 return 0;
2059 * Generic "read page" function for block devices that have the normal
2060 * get_block functionality. This is most of the block device filesystems.
2061 * Reads the page asynchronously --- the unlock_buffer() and
2062 * set/clear_buffer_uptodate() functions propagate buffer state into the
2063 * page struct once IO has completed.
2065 int block_read_full_page(struct page *page, get_block_t *get_block)
2067 struct inode *inode = page->mapping->host;
2068 sector_t iblock, lblock;
2069 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2070 unsigned int blocksize;
2071 int nr, i;
2072 int fully_mapped = 1;
2074 if (!PageLocked(page))
2075 PAGE_BUG(page);
2076 if (PageUptodate(page))
2077 buffer_error();
2078 blocksize = 1 << inode->i_blkbits;
2079 if (!page_has_buffers(page))
2080 create_empty_buffers(page, blocksize, 0);
2081 head = page_buffers(page);
2083 iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2084 lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2085 bh = head;
2086 nr = 0;
2087 i = 0;
2089 do {
2090 if (buffer_uptodate(bh))
2091 continue;
2093 if (!buffer_mapped(bh)) {
2094 fully_mapped = 0;
2095 if (iblock < lblock) {
2096 if (get_block(inode, iblock, bh, 0))
2097 SetPageError(page);
2099 if (!buffer_mapped(bh)) {
2100 void *kaddr = kmap_atomic(page, KM_USER0);
2101 memset(kaddr + i * blocksize, 0, blocksize);
2102 flush_dcache_page(page);
2103 kunmap_atomic(kaddr, KM_USER0);
2104 set_buffer_uptodate(bh);
2105 continue;
2108 * get_block() might have updated the buffer
2109 * synchronously
2111 if (buffer_uptodate(bh))
2112 continue;
2114 arr[nr++] = bh;
2115 } while (i++, iblock++, (bh = bh->b_this_page) != head);
2117 if (fully_mapped)
2118 SetPageMappedToDisk(page);
2120 if (!nr) {
2122 * All buffers are uptodate - we can set the page uptodate
2123 * as well. But not if get_block() returned an error.
2125 if (!PageError(page))
2126 SetPageUptodate(page);
2127 unlock_page(page);
2128 return 0;
2131 /* Stage two: lock the buffers */
2132 for (i = 0; i < nr; i++) {
2133 bh = arr[i];
2134 lock_buffer(bh);
2135 mark_buffer_async_read(bh);
2139 * Stage 3: start the IO. Check for uptodateness
2140 * inside the buffer lock in case another process reading
2141 * the underlying blockdev brought it uptodate (the sct fix).
2143 for (i = 0; i < nr; i++) {
2144 bh = arr[i];
2145 if (buffer_uptodate(bh))
2146 end_buffer_async_read(bh, 1);
2147 else
2148 submit_bh(READ, bh);
2150 return 0;
2153 /* utility function for filesystems that need to do work on expanding
2154 * truncates. Uses prepare/commit_write to allow the filesystem to
2155 * deal with the hole.
2157 int generic_cont_expand(struct inode *inode, loff_t size)
2159 struct address_space *mapping = inode->i_mapping;
2160 struct page *page;
2161 unsigned long index, offset, limit;
2162 int err;
2164 err = -EFBIG;
2165 limit = current->rlim[RLIMIT_FSIZE].rlim_cur;
2166 if (limit != RLIM_INFINITY && size > (loff_t)limit) {
2167 send_sig(SIGXFSZ, current, 0);
2168 goto out;
2170 if (size > inode->i_sb->s_maxbytes)
2171 goto out;
2173 offset = (size & (PAGE_CACHE_SIZE-1)); /* Within page */
2175 /* ugh. in prepare/commit_write, if from==to==start of block, we
2176 ** skip the prepare. make sure we never send an offset for the start
2177 ** of a block
2179 if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {
2180 offset++;
2182 index = size >> PAGE_CACHE_SHIFT;
2183 err = -ENOMEM;
2184 page = grab_cache_page(mapping, index);
2185 if (!page)
2186 goto out;
2187 err = mapping->a_ops->prepare_write(NULL, page, offset, offset);
2188 if (!err) {
2189 err = mapping->a_ops->commit_write(NULL, page, offset, offset);
2191 unlock_page(page);
2192 page_cache_release(page);
2193 if (err > 0)
2194 err = 0;
2195 out:
2196 return err;
2200 * For moronic filesystems that do not allow holes in file.
2201 * We may have to extend the file.
2204 int cont_prepare_write(struct page *page, unsigned offset,
2205 unsigned to, get_block_t *get_block, loff_t *bytes)
2207 struct address_space *mapping = page->mapping;
2208 struct inode *inode = mapping->host;
2209 struct page *new_page;
2210 unsigned long pgpos;
2211 long status;
2212 unsigned zerofrom;
2213 unsigned blocksize = 1 << inode->i_blkbits;
2214 void *kaddr;
2216 while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) {
2217 status = -ENOMEM;
2218 new_page = grab_cache_page(mapping, pgpos);
2219 if (!new_page)
2220 goto out;
2221 /* we might sleep */
2222 if (*bytes>>PAGE_CACHE_SHIFT != pgpos) {
2223 unlock_page(new_page);
2224 page_cache_release(new_page);
2225 continue;
2227 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2228 if (zerofrom & (blocksize-1)) {
2229 *bytes |= (blocksize-1);
2230 (*bytes)++;
2232 status = __block_prepare_write(inode, new_page, zerofrom,
2233 PAGE_CACHE_SIZE, get_block);
2234 if (status)
2235 goto out_unmap;
2236 kaddr = kmap_atomic(new_page, KM_USER0);
2237 memset(kaddr+zerofrom, 0, PAGE_CACHE_SIZE-zerofrom);
2238 flush_dcache_page(new_page);
2239 kunmap_atomic(kaddr, KM_USER0);
2240 __block_commit_write(inode, new_page,
2241 zerofrom, PAGE_CACHE_SIZE);
2242 unlock_page(new_page);
2243 page_cache_release(new_page);
2246 if (page->index < pgpos) {
2247 /* completely inside the area */
2248 zerofrom = offset;
2249 } else {
2250 /* page covers the boundary, find the boundary offset */
2251 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2253 /* if we will expand the thing last block will be filled */
2254 if (to > zerofrom && (zerofrom & (blocksize-1))) {
2255 *bytes |= (blocksize-1);
2256 (*bytes)++;
2259 /* starting below the boundary? Nothing to zero out */
2260 if (offset <= zerofrom)
2261 zerofrom = offset;
2263 status = __block_prepare_write(inode, page, zerofrom, to, get_block);
2264 if (status)
2265 goto out1;
2266 if (zerofrom < offset) {
2267 kaddr = kmap_atomic(page, KM_USER0);
2268 memset(kaddr+zerofrom, 0, offset-zerofrom);
2269 flush_dcache_page(page);
2270 kunmap_atomic(kaddr, KM_USER0);
2271 __block_commit_write(inode, page, zerofrom, offset);
2273 return 0;
2274 out1:
2275 ClearPageUptodate(page);
2276 return status;
2278 out_unmap:
2279 ClearPageUptodate(new_page);
2280 unlock_page(new_page);
2281 page_cache_release(new_page);
2282 out:
2283 return status;
2286 int block_prepare_write(struct page *page, unsigned from, unsigned to,
2287 get_block_t *get_block)
2289 struct inode *inode = page->mapping->host;
2290 int err = __block_prepare_write(inode, page, from, to, get_block);
2291 if (err)
2292 ClearPageUptodate(page);
2293 return err;
2296 int block_commit_write(struct page *page, unsigned from, unsigned to)
2298 struct inode *inode = page->mapping->host;
2299 __block_commit_write(inode,page,from,to);
2300 return 0;
2303 int generic_commit_write(struct file *file, struct page *page,
2304 unsigned from, unsigned to)
2306 struct inode *inode = page->mapping->host;
2307 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2308 __block_commit_write(inode,page,from,to);
2310 * No need to use i_size_read() here, the i_size
2311 * cannot change under us because we hold i_sem.
2313 if (pos > inode->i_size) {
2314 i_size_write(inode, pos);
2315 mark_inode_dirty(inode);
2317 return 0;
2321 * On entry, the page is fully not uptodate.
2322 * On exit the page is fully uptodate in the areas outside (from,to)
2324 int nobh_prepare_write(struct page *page, unsigned from, unsigned to,
2325 get_block_t *get_block)
2327 struct inode *inode = page->mapping->host;
2328 const unsigned blkbits = inode->i_blkbits;
2329 const unsigned blocksize = 1 << blkbits;
2330 struct buffer_head map_bh;
2331 struct buffer_head *read_bh[MAX_BUF_PER_PAGE];
2332 unsigned block_in_page;
2333 unsigned block_start;
2334 sector_t block_in_file;
2335 char *kaddr;
2336 int nr_reads = 0;
2337 int i;
2338 int ret = 0;
2339 int is_mapped_to_disk = 1;
2340 int dirtied_it = 0;
2342 if (PageMappedToDisk(page))
2343 return 0;
2345 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2346 map_bh.b_page = page;
2349 * We loop across all blocks in the page, whether or not they are
2350 * part of the affected region. This is so we can discover if the
2351 * page is fully mapped-to-disk.
2353 for (block_start = 0, block_in_page = 0;
2354 block_start < PAGE_CACHE_SIZE;
2355 block_in_page++, block_start += blocksize) {
2356 unsigned block_end = block_start + blocksize;
2357 int create;
2359 map_bh.b_state = 0;
2360 create = 1;
2361 if (block_start >= to)
2362 create = 0;
2363 ret = get_block(inode, block_in_file + block_in_page,
2364 &map_bh, create);
2365 if (ret)
2366 goto failed;
2367 if (!buffer_mapped(&map_bh))
2368 is_mapped_to_disk = 0;
2369 if (buffer_new(&map_bh))
2370 unmap_underlying_metadata(map_bh.b_bdev,
2371 map_bh.b_blocknr);
2372 if (PageUptodate(page))
2373 continue;
2374 if (buffer_new(&map_bh) || !buffer_mapped(&map_bh)) {
2375 kaddr = kmap_atomic(page, KM_USER0);
2376 if (block_start < from) {
2377 memset(kaddr+block_start, 0, from-block_start);
2378 dirtied_it = 1;
2380 if (block_end > to) {
2381 memset(kaddr + to, 0, block_end - to);
2382 dirtied_it = 1;
2384 flush_dcache_page(page);
2385 kunmap_atomic(kaddr, KM_USER0);
2386 continue;
2388 if (buffer_uptodate(&map_bh))
2389 continue; /* reiserfs does this */
2390 if (block_start < from || block_end > to) {
2391 struct buffer_head *bh = alloc_buffer_head(GFP_NOFS);
2393 if (!bh) {
2394 ret = -ENOMEM;
2395 goto failed;
2397 bh->b_state = map_bh.b_state;
2398 atomic_set(&bh->b_count, 0);
2399 bh->b_this_page = 0;
2400 bh->b_page = page;
2401 bh->b_blocknr = map_bh.b_blocknr;
2402 bh->b_size = blocksize;
2403 bh->b_data = (char *)(long)block_start;
2404 bh->b_bdev = map_bh.b_bdev;
2405 bh->b_private = NULL;
2406 read_bh[nr_reads++] = bh;
2410 if (nr_reads) {
2411 ll_rw_block(READ, nr_reads, read_bh);
2412 for (i = 0; i < nr_reads; i++) {
2413 wait_on_buffer(read_bh[i]);
2414 if (!buffer_uptodate(read_bh[i]))
2415 ret = -EIO;
2416 free_buffer_head(read_bh[i]);
2417 read_bh[i] = NULL;
2419 if (ret)
2420 goto failed;
2423 if (is_mapped_to_disk)
2424 SetPageMappedToDisk(page);
2425 SetPageUptodate(page);
2428 * Setting the page dirty here isn't necessary for the prepare_write
2429 * function - commit_write will do that. But if/when this function is
2430 * used within the pagefault handler to ensure that all mmapped pages
2431 * have backing space in the filesystem, we will need to dirty the page
2432 * if its contents were altered.
2434 if (dirtied_it)
2435 set_page_dirty(page);
2437 return 0;
2439 failed:
2440 for (i = 0; i < nr_reads; i++) {
2441 if (read_bh[i])
2442 free_buffer_head(read_bh[i]);
2446 * Error recovery is pretty slack. Clear the page and mark it dirty
2447 * so we'll later zero out any blocks which _were_ allocated.
2449 kaddr = kmap_atomic(page, KM_USER0);
2450 memset(kaddr, 0, PAGE_CACHE_SIZE);
2451 kunmap_atomic(kaddr, KM_USER0);
2452 SetPageUptodate(page);
2453 set_page_dirty(page);
2454 return ret;
2456 EXPORT_SYMBOL(nobh_prepare_write);
2458 int nobh_commit_write(struct file *file, struct page *page,
2459 unsigned from, unsigned to)
2461 struct inode *inode = page->mapping->host;
2462 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2464 set_page_dirty(page);
2465 if (pos > inode->i_size) {
2466 i_size_write(inode, pos);
2467 mark_inode_dirty(inode);
2469 return 0;
2471 EXPORT_SYMBOL(nobh_commit_write);
2474 * This function assumes that ->prepare_write() uses nobh_prepare_write().
2476 int nobh_truncate_page(struct address_space *mapping, loff_t from)
2478 struct inode *inode = mapping->host;
2479 unsigned blocksize = 1 << inode->i_blkbits;
2480 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2481 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2482 unsigned to;
2483 struct page *page;
2484 struct address_space_operations *a_ops = mapping->a_ops;
2485 char *kaddr;
2486 int ret = 0;
2488 if ((offset & (blocksize - 1)) == 0)
2489 goto out;
2491 ret = -ENOMEM;
2492 page = grab_cache_page(mapping, index);
2493 if (!page)
2494 goto out;
2496 to = (offset + blocksize) & ~(blocksize - 1);
2497 ret = a_ops->prepare_write(NULL, page, offset, to);
2498 if (ret == 0) {
2499 kaddr = kmap_atomic(page, KM_USER0);
2500 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2501 flush_dcache_page(page);
2502 kunmap_atomic(kaddr, KM_USER0);
2503 set_page_dirty(page);
2505 unlock_page(page);
2506 page_cache_release(page);
2507 out:
2508 return ret;
2510 EXPORT_SYMBOL(nobh_truncate_page);
2512 int block_truncate_page(struct address_space *mapping,
2513 loff_t from, get_block_t *get_block)
2515 unsigned long index = from >> PAGE_CACHE_SHIFT;
2516 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2517 unsigned blocksize, iblock, length, pos;
2518 struct inode *inode = mapping->host;
2519 struct page *page;
2520 struct buffer_head *bh;
2521 void *kaddr;
2522 int err;
2524 blocksize = 1 << inode->i_blkbits;
2525 length = offset & (blocksize - 1);
2527 /* Block boundary? Nothing to do */
2528 if (!length)
2529 return 0;
2531 length = blocksize - length;
2532 iblock = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2534 page = grab_cache_page(mapping, index);
2535 err = -ENOMEM;
2536 if (!page)
2537 goto out;
2539 if (!page_has_buffers(page))
2540 create_empty_buffers(page, blocksize, 0);
2542 /* Find the buffer that contains "offset" */
2543 bh = page_buffers(page);
2544 pos = blocksize;
2545 while (offset >= pos) {
2546 bh = bh->b_this_page;
2547 iblock++;
2548 pos += blocksize;
2551 err = 0;
2552 if (!buffer_mapped(bh)) {
2553 err = get_block(inode, iblock, bh, 0);
2554 if (err)
2555 goto unlock;
2556 /* unmapped? It's a hole - nothing to do */
2557 if (!buffer_mapped(bh))
2558 goto unlock;
2561 /* Ok, it's mapped. Make sure it's up-to-date */
2562 if (PageUptodate(page))
2563 set_buffer_uptodate(bh);
2565 if (!buffer_uptodate(bh) && !buffer_delay(bh)) {
2566 err = -EIO;
2567 ll_rw_block(READ, 1, &bh);
2568 wait_on_buffer(bh);
2569 /* Uhhuh. Read error. Complain and punt. */
2570 if (!buffer_uptodate(bh))
2571 goto unlock;
2574 kaddr = kmap_atomic(page, KM_USER0);
2575 memset(kaddr + offset, 0, length);
2576 flush_dcache_page(page);
2577 kunmap_atomic(kaddr, KM_USER0);
2579 mark_buffer_dirty(bh);
2580 err = 0;
2582 unlock:
2583 unlock_page(page);
2584 page_cache_release(page);
2585 out:
2586 return err;
2590 * The generic ->writepage function for buffer-backed address_spaces
2592 int block_write_full_page(struct page *page, get_block_t *get_block,
2593 struct writeback_control *wbc)
2595 struct inode * const inode = page->mapping->host;
2596 loff_t i_size = i_size_read(inode);
2597 const unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
2598 unsigned offset;
2599 void *kaddr;
2601 /* Is the page fully inside i_size? */
2602 if (page->index < end_index)
2603 return __block_write_full_page(inode, page, get_block, wbc);
2605 /* Is the page fully outside i_size? (truncate in progress) */
2606 offset = i_size & (PAGE_CACHE_SIZE-1);
2607 if (page->index >= end_index+1 || !offset) {
2609 * The page may have dirty, unmapped buffers. For example,
2610 * they may have been added in ext3_writepage(). Make them
2611 * freeable here, so the page does not leak.
2613 block_invalidatepage(page, 0);
2614 unlock_page(page);
2615 return 0; /* don't care */
2619 * The page straddles i_size. It must be zeroed out on each and every
2620 * writepage invocation because it may be mmapped. "A file is mapped
2621 * in multiples of the page size. For a file that is not a multiple of
2622 * the page size, the remaining memory is zeroed when mapped, and
2623 * writes to that region are not written out to the file."
2625 kaddr = kmap_atomic(page, KM_USER0);
2626 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2627 flush_dcache_page(page);
2628 kunmap_atomic(kaddr, KM_USER0);
2629 return __block_write_full_page(inode, page, get_block, wbc);
2632 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2633 get_block_t *get_block)
2635 struct buffer_head tmp;
2636 struct inode *inode = mapping->host;
2637 tmp.b_state = 0;
2638 tmp.b_blocknr = 0;
2639 get_block(inode, block, &tmp, 0);
2640 return tmp.b_blocknr;
2643 static int end_bio_bh_io_sync(struct bio *bio, unsigned int bytes_done, int err)
2645 struct buffer_head *bh = bio->bi_private;
2647 if (bio->bi_size)
2648 return 1;
2650 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2651 bio_put(bio);
2652 return 0;
2655 int submit_bh(int rw, struct buffer_head * bh)
2657 struct bio *bio;
2659 BUG_ON(!buffer_locked(bh));
2660 BUG_ON(!buffer_mapped(bh));
2661 BUG_ON(!bh->b_end_io);
2663 if ((rw == READ || rw == READA) && buffer_uptodate(bh))
2664 buffer_error();
2665 if (rw == WRITE && !buffer_uptodate(bh))
2666 buffer_error();
2667 if (rw == READ && buffer_dirty(bh))
2668 buffer_error();
2670 /* Only clear out a write error when rewriting */
2671 if (test_set_buffer_req(bh) && rw == WRITE)
2672 clear_buffer_write_io_error(bh);
2675 * from here on down, it's all bio -- do the initial mapping,
2676 * submit_bio -> generic_make_request may further map this bio around
2678 bio = bio_alloc(GFP_NOIO, 1);
2680 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2681 bio->bi_bdev = bh->b_bdev;
2682 bio->bi_io_vec[0].bv_page = bh->b_page;
2683 bio->bi_io_vec[0].bv_len = bh->b_size;
2684 bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2686 bio->bi_vcnt = 1;
2687 bio->bi_idx = 0;
2688 bio->bi_size = bh->b_size;
2690 bio->bi_end_io = end_bio_bh_io_sync;
2691 bio->bi_private = bh;
2693 return submit_bio(rw, bio);
2697 * ll_rw_block: low-level access to block devices (DEPRECATED)
2698 * @rw: whether to %READ or %WRITE or maybe %READA (readahead)
2699 * @nr: number of &struct buffer_heads in the array
2700 * @bhs: array of pointers to &struct buffer_head
2702 * ll_rw_block() takes an array of pointers to &struct buffer_heads,
2703 * and requests an I/O operation on them, either a %READ or a %WRITE.
2704 * The third %READA option is described in the documentation for
2705 * generic_make_request() which ll_rw_block() calls.
2707 * This function drops any buffer that it cannot get a lock on (with the
2708 * BH_Lock state bit), any buffer that appears to be clean when doing a
2709 * write request, and any buffer that appears to be up-to-date when doing
2710 * read request. Further it marks as clean buffers that are processed for
2711 * writing (the buffer cache won't assume that they are actually clean until
2712 * the buffer gets unlocked).
2714 * ll_rw_block sets b_end_io to simple completion handler that marks
2715 * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2716 * any waiters.
2718 * All of the buffers must be for the same device, and must also be a
2719 * multiple of the current approved size for the device.
2721 void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2723 int i;
2725 for (i = 0; i < nr; i++) {
2726 struct buffer_head *bh = bhs[i];
2728 if (test_set_buffer_locked(bh))
2729 continue;
2731 get_bh(bh);
2732 if (rw == WRITE) {
2733 bh->b_end_io = end_buffer_write_sync;
2734 if (test_clear_buffer_dirty(bh)) {
2735 submit_bh(WRITE, bh);
2736 continue;
2738 } else {
2739 bh->b_end_io = end_buffer_read_sync;
2740 if (!buffer_uptodate(bh)) {
2741 submit_bh(rw, bh);
2742 continue;
2745 unlock_buffer(bh);
2746 put_bh(bh);
2751 * For a data-integrity writeout, we need to wait upon any in-progress I/O
2752 * and then start new I/O and then wait upon it.
2754 void sync_dirty_buffer(struct buffer_head *bh)
2756 WARN_ON(atomic_read(&bh->b_count) < 1);
2757 lock_buffer(bh);
2758 if (test_clear_buffer_dirty(bh)) {
2759 get_bh(bh);
2760 bh->b_end_io = end_buffer_write_sync;
2761 submit_bh(WRITE, bh);
2762 wait_on_buffer(bh);
2763 } else {
2764 unlock_buffer(bh);
2769 * Sanity checks for try_to_free_buffers.
2771 static void check_ttfb_buffer(struct page *page, struct buffer_head *bh)
2773 if (!buffer_uptodate(bh) && !buffer_req(bh)) {
2774 if (PageUptodate(page) && page->mapping
2775 && buffer_mapped(bh) /* discard_buffer */
2776 && S_ISBLK(page->mapping->host->i_mode))
2778 buffer_error();
2784 * try_to_free_buffers() checks if all the buffers on this particular page
2785 * are unused, and releases them if so.
2787 * Exclusion against try_to_free_buffers may be obtained by either
2788 * locking the page or by holding its mapping's private_lock.
2790 * If the page is dirty but all the buffers are clean then we need to
2791 * be sure to mark the page clean as well. This is because the page
2792 * may be against a block device, and a later reattachment of buffers
2793 * to a dirty page will set *all* buffers dirty. Which would corrupt
2794 * filesystem data on the same device.
2796 * The same applies to regular filesystem pages: if all the buffers are
2797 * clean then we set the page clean and proceed. To do that, we require
2798 * total exclusion from __set_page_dirty_buffers(). That is obtained with
2799 * private_lock.
2801 * try_to_free_buffers() is non-blocking.
2803 static inline int buffer_busy(struct buffer_head *bh)
2805 return atomic_read(&bh->b_count) |
2806 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
2809 static int
2810 drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
2812 struct buffer_head *head = page_buffers(page);
2813 struct buffer_head *bh;
2814 int was_uptodate = 1;
2816 bh = head;
2817 do {
2818 check_ttfb_buffer(page, bh);
2819 if (buffer_write_io_error(bh))
2820 set_bit(AS_EIO, &page->mapping->flags);
2821 if (buffer_busy(bh))
2822 goto failed;
2823 if (!buffer_uptodate(bh) && !buffer_req(bh))
2824 was_uptodate = 0;
2825 bh = bh->b_this_page;
2826 } while (bh != head);
2828 if (!was_uptodate && PageUptodate(page) && !PageError(page))
2829 buffer_error();
2831 do {
2832 struct buffer_head *next = bh->b_this_page;
2834 if (!list_empty(&bh->b_assoc_buffers))
2835 __remove_assoc_queue(bh);
2836 bh = next;
2837 } while (bh != head);
2838 *buffers_to_free = head;
2839 __clear_page_buffers(page);
2840 return 1;
2841 failed:
2842 return 0;
2845 int try_to_free_buffers(struct page *page)
2847 struct address_space * const mapping = page->mapping;
2848 struct buffer_head *buffers_to_free = NULL;
2849 int ret = 0;
2851 BUG_ON(!PageLocked(page));
2852 if (PageWriteback(page))
2853 return 0;
2855 if (mapping == NULL) { /* swapped-in anon page */
2856 ret = drop_buffers(page, &buffers_to_free);
2857 goto out;
2860 spin_lock(&mapping->private_lock);
2861 ret = drop_buffers(page, &buffers_to_free);
2862 if (ret && !PageSwapCache(page)) {
2864 * If the filesystem writes its buffers by hand (eg ext3)
2865 * then we can have clean buffers against a dirty page. We
2866 * clean the page here; otherwise later reattachment of buffers
2867 * could encounter a non-uptodate page, which is unresolvable.
2868 * This only applies in the rare case where try_to_free_buffers
2869 * succeeds but the page is not freed.
2871 clear_page_dirty(page);
2873 spin_unlock(&mapping->private_lock);
2874 out:
2875 if (buffers_to_free) {
2876 struct buffer_head *bh = buffers_to_free;
2878 do {
2879 struct buffer_head *next = bh->b_this_page;
2880 free_buffer_head(bh);
2881 bh = next;
2882 } while (bh != buffers_to_free);
2884 return ret;
2886 EXPORT_SYMBOL(try_to_free_buffers);
2888 int block_sync_page(struct page *page)
2890 blk_run_queues();
2891 return 0;
2895 * There are no bdflush tunables left. But distributions are
2896 * still running obsolete flush daemons, so we terminate them here.
2898 * Use of bdflush() is deprecated and will be removed in a future kernel.
2899 * The `pdflush' kernel threads fully replace bdflush daemons and this call.
2901 asmlinkage long sys_bdflush(int func, long data)
2903 static int msg_count;
2905 if (!capable(CAP_SYS_ADMIN))
2906 return -EPERM;
2908 if (msg_count < 5) {
2909 msg_count++;
2910 printk(KERN_INFO
2911 "warning: process `%s' used the obsolete bdflush"
2912 " system call\n", current->comm);
2913 printk(KERN_INFO "Fix your initscripts?\n");
2916 if (func == 1)
2917 do_exit(0);
2918 return 0;
2922 * Buffer-head allocation
2924 static kmem_cache_t *bh_cachep;
2927 * Once the number of bh's in the machine exceeds this level, we start
2928 * stripping them in writeback.
2930 static int max_buffer_heads;
2932 int buffer_heads_over_limit;
2934 struct bh_accounting {
2935 int nr; /* Number of live bh's */
2936 int ratelimit; /* Limit cacheline bouncing */
2939 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
2941 static void recalc_bh_state(void)
2943 int i;
2944 int tot = 0;
2946 if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
2947 return;
2948 __get_cpu_var(bh_accounting).ratelimit = 0;
2949 for_each_cpu(i)
2950 tot += per_cpu(bh_accounting, i).nr;
2951 buffer_heads_over_limit = (tot > max_buffer_heads);
2954 struct buffer_head *alloc_buffer_head(int gfp_flags)
2956 struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
2957 if (ret) {
2958 preempt_disable();
2959 __get_cpu_var(bh_accounting).nr++;
2960 recalc_bh_state();
2961 preempt_enable();
2963 return ret;
2965 EXPORT_SYMBOL(alloc_buffer_head);
2967 void free_buffer_head(struct buffer_head *bh)
2969 BUG_ON(!list_empty(&bh->b_assoc_buffers));
2970 kmem_cache_free(bh_cachep, bh);
2971 preempt_disable();
2972 __get_cpu_var(bh_accounting).nr--;
2973 recalc_bh_state();
2974 preempt_enable();
2976 EXPORT_SYMBOL(free_buffer_head);
2978 static void
2979 init_buffer_head(void *data, kmem_cache_t *cachep, unsigned long flags)
2981 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
2982 SLAB_CTOR_CONSTRUCTOR) {
2983 struct buffer_head * bh = (struct buffer_head *)data;
2985 memset(bh, 0, sizeof(*bh));
2986 INIT_LIST_HEAD(&bh->b_assoc_buffers);
2990 static void buffer_init_cpu(int cpu)
2992 struct bh_accounting *bha = &per_cpu(bh_accounting, cpu);
2993 struct bh_lru *bhl = &per_cpu(bh_lrus, cpu);
2995 bha->nr = 0;
2996 bha->ratelimit = 0;
2997 memset(bhl, 0, sizeof(*bhl));
3000 static int __devinit buffer_cpu_notify(struct notifier_block *self,
3001 unsigned long action, void *hcpu)
3003 long cpu = (long)hcpu;
3004 switch(action) {
3005 case CPU_UP_PREPARE:
3006 buffer_init_cpu(cpu);
3007 break;
3008 default:
3009 break;
3011 return NOTIFY_OK;
3014 static struct notifier_block __devinitdata buffer_nb = {
3015 .notifier_call = buffer_cpu_notify,
3018 void __init buffer_init(void)
3020 int i;
3021 int nrpages;
3023 bh_cachep = kmem_cache_create("buffer_head",
3024 sizeof(struct buffer_head), 0,
3025 0, init_buffer_head, NULL);
3026 for (i = 0; i < ARRAY_SIZE(bh_wait_queue_heads); i++)
3027 init_waitqueue_head(&bh_wait_queue_heads[i].wqh);
3030 * Limit the bh occupancy to 10% of ZONE_NORMAL
3032 nrpages = (nr_free_buffer_pages() * 10) / 100;
3033 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3034 buffer_cpu_notify(&buffer_nb, (unsigned long)CPU_UP_PREPARE,
3035 (void *)(long)smp_processor_id());
3036 register_cpu_notifier(&buffer_nb);
3039 EXPORT_SYMBOL(__bforget);
3040 EXPORT_SYMBOL(__brelse);
3041 EXPORT_SYMBOL(__wait_on_buffer);
3042 EXPORT_SYMBOL(block_commit_write);
3043 EXPORT_SYMBOL(block_prepare_write);
3044 EXPORT_SYMBOL(block_read_full_page);
3045 EXPORT_SYMBOL(block_sync_page);
3046 EXPORT_SYMBOL(block_truncate_page);
3047 EXPORT_SYMBOL(block_write_full_page);
3048 EXPORT_SYMBOL(buffer_insert_list);
3049 EXPORT_SYMBOL(cont_prepare_write);
3050 EXPORT_SYMBOL(end_buffer_async_write);
3051 EXPORT_SYMBOL(end_buffer_read_sync);
3052 EXPORT_SYMBOL(end_buffer_write_sync);
3053 EXPORT_SYMBOL(file_fsync);
3054 EXPORT_SYMBOL(fsync_bdev);
3055 EXPORT_SYMBOL(fsync_buffers_list);
3056 EXPORT_SYMBOL(generic_block_bmap);
3057 EXPORT_SYMBOL(generic_commit_write);
3058 EXPORT_SYMBOL(generic_cont_expand);
3059 EXPORT_SYMBOL(init_buffer);
3060 EXPORT_SYMBOL(invalidate_bdev);
3061 EXPORT_SYMBOL(ll_rw_block);
3062 EXPORT_SYMBOL(mark_buffer_dirty);
3063 EXPORT_SYMBOL(submit_bh);
3064 EXPORT_SYMBOL(sync_dirty_buffer);
3065 EXPORT_SYMBOL(unlock_buffer);