[PATCH] Fix bugs in analog tv i2c-helper chipset drivers
[linux-2.6/history.git] / fs / buffer.c
blob5247132a081b9fcf018648b2d717ce464419fa08
1 /*
2 * linux/fs/buffer.c
4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
5 */
7 /*
8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
10 * Removed a lot of unnecessary code and simplified things now that
11 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
13 * Speed up hash, lru, and free list operations. Use gfp() for allocating
14 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
16 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
18 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
21 #include <linux/config.h>
22 #include <linux/kernel.h>
23 #include <linux/fs.h>
24 #include <linux/mm.h>
25 #include <linux/percpu.h>
26 #include <linux/slab.h>
27 #include <linux/smp_lock.h>
28 #include <linux/blkdev.h>
29 #include <linux/file.h>
30 #include <linux/quotaops.h>
31 #include <linux/highmem.h>
32 #include <linux/module.h>
33 #include <linux/writeback.h>
34 #include <linux/hash.h>
35 #include <linux/suspend.h>
36 #include <linux/buffer_head.h>
37 #include <linux/bio.h>
38 #include <linux/notifier.h>
39 #include <linux/cpu.h>
40 #include <asm/bitops.h>
42 static void invalidate_bh_lrus(void);
44 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
47 * Hashed waitqueue_head's for wait_on_buffer()
49 #define BH_WAIT_TABLE_ORDER 7
50 static struct bh_wait_queue_head {
51 wait_queue_head_t wqh;
52 } ____cacheline_aligned_in_smp bh_wait_queue_heads[1<<BH_WAIT_TABLE_ORDER];
55 * Debug/devel support stuff
58 void __buffer_error(char *file, int line)
60 static int enough;
62 if (enough > 10)
63 return;
64 enough++;
65 printk("buffer layer error at %s:%d\n", file, line);
66 #ifndef CONFIG_KALLSYMS
67 printk("Pass this trace through ksymoops for reporting\n");
68 #endif
69 dump_stack();
71 EXPORT_SYMBOL(__buffer_error);
73 inline void
74 init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
76 bh->b_end_io = handler;
77 bh->b_private = private;
81 * Return the address of the waitqueue_head to be used for this
82 * buffer_head
84 wait_queue_head_t *bh_waitq_head(struct buffer_head *bh)
86 return &bh_wait_queue_heads[hash_ptr(bh, BH_WAIT_TABLE_ORDER)].wqh;
88 EXPORT_SYMBOL(bh_waitq_head);
90 void wake_up_buffer(struct buffer_head *bh)
92 wait_queue_head_t *wq = bh_waitq_head(bh);
94 smp_mb();
95 if (waitqueue_active(wq))
96 wake_up_all(wq);
98 EXPORT_SYMBOL(wake_up_buffer);
100 void unlock_buffer(struct buffer_head *bh)
103 * unlock_buffer against a zero-count bh is a bug, if the page
104 * is not locked. Because then nothing protects the buffer's
105 * waitqueue, which is used here. (Well. Other locked buffers
106 * against the page will pin it. But complain anyway).
108 if (atomic_read(&bh->b_count) == 0 &&
109 !PageLocked(bh->b_page) &&
110 !PageWriteback(bh->b_page))
111 buffer_error();
113 clear_buffer_locked(bh);
114 smp_mb__after_clear_bit();
115 wake_up_buffer(bh);
119 * Block until a buffer comes unlocked. This doesn't stop it
120 * from becoming locked again - you have to lock it yourself
121 * if you want to preserve its state.
123 void __wait_on_buffer(struct buffer_head * bh)
125 wait_queue_head_t *wqh = bh_waitq_head(bh);
126 DEFINE_WAIT(wait);
128 if (atomic_read(&bh->b_count) == 0 &&
129 (!bh->b_page || !PageLocked(bh->b_page)))
130 buffer_error();
132 do {
133 prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
134 if (buffer_locked(bh)) {
135 blk_run_queues();
136 io_schedule();
138 } while (buffer_locked(bh));
139 finish_wait(wqh, &wait);
142 static void
143 __set_page_buffers(struct page *page, struct buffer_head *head)
145 if (page_has_buffers(page))
146 buffer_error();
147 page_cache_get(page);
148 SetPagePrivate(page);
149 page->private = (unsigned long)head;
152 static void
153 __clear_page_buffers(struct page *page)
155 ClearPagePrivate(page);
156 page->private = 0;
157 page_cache_release(page);
160 static void buffer_io_error(struct buffer_head *bh)
162 char b[BDEVNAME_SIZE];
164 printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
165 bdevname(bh->b_bdev, b),
166 (unsigned long long)bh->b_blocknr);
170 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
171 * unlock the buffer. This is what ll_rw_block uses too.
173 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
175 if (uptodate) {
176 set_buffer_uptodate(bh);
177 } else {
178 /* This happens, due to failed READA attempts. */
179 clear_buffer_uptodate(bh);
181 unlock_buffer(bh);
182 put_bh(bh);
185 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
187 char b[BDEVNAME_SIZE];
189 if (uptodate) {
190 set_buffer_uptodate(bh);
191 } else {
192 buffer_io_error(bh);
193 printk(KERN_WARNING "lost page write due to I/O error on %s\n",
194 bdevname(bh->b_bdev, b));
195 set_buffer_write_io_error(bh);
196 clear_buffer_uptodate(bh);
198 unlock_buffer(bh);
199 put_bh(bh);
203 * Write out and wait upon all the dirty data associated with a block
204 * device via its mapping. Does not take the superblock lock.
206 int sync_blockdev(struct block_device *bdev)
208 int ret = 0;
210 if (bdev) {
211 int err;
213 ret = filemap_fdatawrite(bdev->bd_inode->i_mapping);
214 err = filemap_fdatawait(bdev->bd_inode->i_mapping);
215 if (!ret)
216 ret = err;
218 return ret;
220 EXPORT_SYMBOL(sync_blockdev);
223 * Write out and wait upon all dirty data associated with this
224 * superblock. Filesystem data as well as the underlying block
225 * device. Takes the superblock lock.
227 int fsync_super(struct super_block *sb)
229 sync_inodes_sb(sb, 0);
230 DQUOT_SYNC(sb);
231 lock_super(sb);
232 if (sb->s_dirt && sb->s_op->write_super)
233 sb->s_op->write_super(sb);
234 unlock_super(sb);
235 if (sb->s_op->sync_fs)
236 sb->s_op->sync_fs(sb, 1);
237 sync_blockdev(sb->s_bdev);
238 sync_inodes_sb(sb, 1);
240 return sync_blockdev(sb->s_bdev);
244 * Write out and wait upon all dirty data associated with this
245 * device. Filesystem data as well as the underlying block
246 * device. Takes the superblock lock.
248 int fsync_bdev(struct block_device *bdev)
250 struct super_block *sb = get_super(bdev);
251 if (sb) {
252 int res = fsync_super(sb);
253 drop_super(sb);
254 return res;
256 return sync_blockdev(bdev);
260 * sync everything. Start out by waking pdflush, because that writes back
261 * all queues in parallel.
263 static void do_sync(unsigned long wait)
265 wakeup_bdflush(0);
266 sync_inodes(0); /* All mappings, inodes and their blockdevs */
267 DQUOT_SYNC(NULL);
268 sync_supers(); /* Write the superblocks */
269 sync_filesystems(0); /* Start syncing the filesystems */
270 sync_filesystems(wait); /* Waitingly sync the filesystems */
271 sync_inodes(wait); /* Mappings, inodes and blockdevs, again. */
272 if (!wait)
273 printk("Emergency Sync complete\n");
276 asmlinkage long sys_sync(void)
278 do_sync(1);
279 return 0;
282 void emergency_sync(void)
284 pdflush_operation(do_sync, 0);
288 * Generic function to fsync a file.
290 * filp may be NULL if called via the msync of a vma.
293 int file_fsync(struct file *filp, struct dentry *dentry, int datasync)
295 struct inode * inode = dentry->d_inode;
296 struct super_block * sb;
297 int ret;
299 /* sync the inode to buffers */
300 write_inode_now(inode, 0);
302 /* sync the superblock to buffers */
303 sb = inode->i_sb;
304 lock_super(sb);
305 if (sb->s_op->write_super)
306 sb->s_op->write_super(sb);
307 unlock_super(sb);
309 /* .. finally sync the buffers to disk */
310 ret = sync_blockdev(sb->s_bdev);
311 return ret;
314 asmlinkage long sys_fsync(unsigned int fd)
316 struct file * file;
317 struct dentry * dentry;
318 struct inode * inode;
319 int ret, err;
321 ret = -EBADF;
322 file = fget(fd);
323 if (!file)
324 goto out;
326 dentry = file->f_dentry;
327 inode = dentry->d_inode;
329 ret = -EINVAL;
330 if (!file->f_op || !file->f_op->fsync) {
331 /* Why? We can still call filemap_fdatawrite */
332 goto out_putf;
335 /* We need to protect against concurrent writers.. */
336 down(&inode->i_sem);
337 current->flags |= PF_SYNCWRITE;
338 ret = filemap_fdatawrite(inode->i_mapping);
339 err = file->f_op->fsync(file, dentry, 0);
340 if (!ret)
341 ret = err;
342 err = filemap_fdatawait(inode->i_mapping);
343 if (!ret)
344 ret = err;
345 current->flags &= ~PF_SYNCWRITE;
346 up(&inode->i_sem);
348 out_putf:
349 fput(file);
350 out:
351 return ret;
354 asmlinkage long sys_fdatasync(unsigned int fd)
356 struct file * file;
357 struct dentry * dentry;
358 struct inode * inode;
359 int ret, err;
361 ret = -EBADF;
362 file = fget(fd);
363 if (!file)
364 goto out;
366 dentry = file->f_dentry;
367 inode = dentry->d_inode;
369 ret = -EINVAL;
370 if (!file->f_op || !file->f_op->fsync)
371 goto out_putf;
373 down(&inode->i_sem);
374 current->flags |= PF_SYNCWRITE;
375 ret = filemap_fdatawrite(inode->i_mapping);
376 err = file->f_op->fsync(file, dentry, 1);
377 if (!ret)
378 ret = err;
379 err = filemap_fdatawait(inode->i_mapping);
380 if (!ret)
381 ret = err;
382 current->flags &= ~PF_SYNCWRITE;
383 up(&inode->i_sem);
385 out_putf:
386 fput(file);
387 out:
388 return ret;
392 * Various filesystems appear to want __find_get_block to be non-blocking.
393 * But it's the page lock which protects the buffers. To get around this,
394 * we get exclusion from try_to_free_buffers with the blockdev mapping's
395 * private_lock.
397 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
398 * may be quite high. This code could TryLock the page, and if that
399 * succeeds, there is no need to take private_lock. (But if
400 * private_lock is contended then so is mapping->page_lock).
402 static struct buffer_head *
403 __find_get_block_slow(struct block_device *bdev, sector_t block, int unused)
405 struct inode *bd_inode = bdev->bd_inode;
406 struct address_space *bd_mapping = bd_inode->i_mapping;
407 struct buffer_head *ret = NULL;
408 unsigned long index;
409 struct buffer_head *bh;
410 struct buffer_head *head;
411 struct page *page;
413 index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
414 page = find_get_page(bd_mapping, index);
415 if (!page)
416 goto out;
418 spin_lock(&bd_mapping->private_lock);
419 if (!page_has_buffers(page))
420 goto out_unlock;
421 head = page_buffers(page);
422 bh = head;
423 do {
424 if (bh->b_blocknr == block) {
425 ret = bh;
426 get_bh(bh);
427 goto out_unlock;
429 bh = bh->b_this_page;
430 } while (bh != head);
431 buffer_error();
432 printk("block=%llu, b_blocknr=%llu\n",
433 (unsigned long long)block, (unsigned long long)bh->b_blocknr);
434 printk("b_state=0x%08lx, b_size=%u\n", bh->b_state, bh->b_size);
435 out_unlock:
436 spin_unlock(&bd_mapping->private_lock);
437 page_cache_release(page);
438 out:
439 return ret;
442 /* If invalidate_buffers() will trash dirty buffers, it means some kind
443 of fs corruption is going on. Trashing dirty data always imply losing
444 information that was supposed to be just stored on the physical layer
445 by the user.
447 Thus invalidate_buffers in general usage is not allwowed to trash
448 dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
449 be preserved. These buffers are simply skipped.
451 We also skip buffers which are still in use. For example this can
452 happen if a userspace program is reading the block device.
454 NOTE: In the case where the user removed a removable-media-disk even if
455 there's still dirty data not synced on disk (due a bug in the device driver
456 or due an error of the user), by not destroying the dirty buffers we could
457 generate corruption also on the next media inserted, thus a parameter is
458 necessary to handle this case in the most safe way possible (trying
459 to not corrupt also the new disk inserted with the data belonging to
460 the old now corrupted disk). Also for the ramdisk the natural thing
461 to do in order to release the ramdisk memory is to destroy dirty buffers.
463 These are two special cases. Normal usage imply the device driver
464 to issue a sync on the device (without waiting I/O completion) and
465 then an invalidate_buffers call that doesn't trash dirty buffers.
467 For handling cache coherency with the blkdev pagecache the 'update' case
468 is been introduced. It is needed to re-read from disk any pinned
469 buffer. NOTE: re-reading from disk is destructive so we can do it only
470 when we assume nobody is changing the buffercache under our I/O and when
471 we think the disk contains more recent information than the buffercache.
472 The update == 1 pass marks the buffers we need to update, the update == 2
473 pass does the actual I/O. */
474 void invalidate_bdev(struct block_device *bdev, int destroy_dirty_buffers)
476 invalidate_bh_lrus();
478 * FIXME: what about destroy_dirty_buffers?
479 * We really want to use invalidate_inode_pages2() for
480 * that, but not until that's cleaned up.
482 invalidate_inode_pages(bdev->bd_inode->i_mapping);
486 * Kick pdflush then try to free up some ZONE_NORMAL memory.
488 static void free_more_memory(void)
490 struct zone *zone;
491 pg_data_t *pgdat;
493 wakeup_bdflush(1024);
494 blk_run_queues();
495 yield();
497 for_each_pgdat(pgdat) {
498 zone = pgdat->node_zonelists[GFP_NOFS&GFP_ZONEMASK].zones[0];
499 if (zone)
500 try_to_free_pages(zone, GFP_NOFS, 0);
505 * I/O completion handler for block_read_full_page() - pages
506 * which come unlocked at the end of I/O.
508 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
510 static spinlock_t page_uptodate_lock = SPIN_LOCK_UNLOCKED;
511 unsigned long flags;
512 struct buffer_head *tmp;
513 struct page *page;
514 int page_uptodate = 1;
516 BUG_ON(!buffer_async_read(bh));
518 page = bh->b_page;
519 if (uptodate) {
520 set_buffer_uptodate(bh);
521 } else {
522 clear_buffer_uptodate(bh);
523 buffer_io_error(bh);
524 SetPageError(page);
528 * Be _very_ careful from here on. Bad things can happen if
529 * two buffer heads end IO at almost the same time and both
530 * decide that the page is now completely done.
532 spin_lock_irqsave(&page_uptodate_lock, flags);
533 clear_buffer_async_read(bh);
534 unlock_buffer(bh);
535 tmp = bh;
536 do {
537 if (!buffer_uptodate(tmp))
538 page_uptodate = 0;
539 if (buffer_async_read(tmp)) {
540 BUG_ON(!buffer_locked(tmp));
541 goto still_busy;
543 tmp = tmp->b_this_page;
544 } while (tmp != bh);
545 spin_unlock_irqrestore(&page_uptodate_lock, flags);
548 * If none of the buffers had errors and they are all
549 * uptodate then we can set the page uptodate.
551 if (page_uptodate && !PageError(page))
552 SetPageUptodate(page);
553 unlock_page(page);
554 return;
556 still_busy:
557 spin_unlock_irqrestore(&page_uptodate_lock, flags);
558 return;
562 * Completion handler for block_write_full_page() - pages which are unlocked
563 * during I/O, and which have PageWriteback cleared upon I/O completion.
565 void end_buffer_async_write(struct buffer_head *bh, int uptodate)
567 char b[BDEVNAME_SIZE];
568 static spinlock_t page_uptodate_lock = SPIN_LOCK_UNLOCKED;
569 unsigned long flags;
570 struct buffer_head *tmp;
571 struct page *page;
573 BUG_ON(!buffer_async_write(bh));
575 page = bh->b_page;
576 if (uptodate) {
577 set_buffer_uptodate(bh);
578 } else {
579 buffer_io_error(bh);
580 printk(KERN_WARNING "lost page write due to I/O error on %s\n",
581 bdevname(bh->b_bdev, b));
582 set_bit(AS_EIO, &page->mapping->flags);
583 clear_buffer_uptodate(bh);
584 SetPageError(page);
587 spin_lock_irqsave(&page_uptodate_lock, flags);
588 clear_buffer_async_write(bh);
589 unlock_buffer(bh);
590 tmp = bh->b_this_page;
591 while (tmp != bh) {
592 if (buffer_async_write(tmp)) {
593 BUG_ON(!buffer_locked(tmp));
594 goto still_busy;
596 tmp = tmp->b_this_page;
598 spin_unlock_irqrestore(&page_uptodate_lock, flags);
599 end_page_writeback(page);
600 return;
602 still_busy:
603 spin_unlock_irqrestore(&page_uptodate_lock, flags);
604 return;
608 * If a page's buffers are under async readin (end_buffer_async_read
609 * completion) then there is a possibility that another thread of
610 * control could lock one of the buffers after it has completed
611 * but while some of the other buffers have not completed. This
612 * locked buffer would confuse end_buffer_async_read() into not unlocking
613 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
614 * that this buffer is not under async I/O.
616 * The page comes unlocked when it has no locked buffer_async buffers
617 * left.
619 * PageLocked prevents anyone starting new async I/O reads any of
620 * the buffers.
622 * PageWriteback is used to prevent simultaneous writeout of the same
623 * page.
625 * PageLocked prevents anyone from starting writeback of a page which is
626 * under read I/O (PageWriteback is only ever set against a locked page).
628 void mark_buffer_async_read(struct buffer_head *bh)
630 bh->b_end_io = end_buffer_async_read;
631 set_buffer_async_read(bh);
633 EXPORT_SYMBOL(mark_buffer_async_read);
635 void mark_buffer_async_write(struct buffer_head *bh)
637 bh->b_end_io = end_buffer_async_write;
638 set_buffer_async_write(bh);
640 EXPORT_SYMBOL(mark_buffer_async_write);
644 * fs/buffer.c contains helper functions for buffer-backed address space's
645 * fsync functions. A common requirement for buffer-based filesystems is
646 * that certain data from the backing blockdev needs to be written out for
647 * a successful fsync(). For example, ext2 indirect blocks need to be
648 * written back and waited upon before fsync() returns.
650 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
651 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
652 * management of a list of dependent buffers at ->i_mapping->private_list.
654 * Locking is a little subtle: try_to_free_buffers() will remove buffers
655 * from their controlling inode's queue when they are being freed. But
656 * try_to_free_buffers() will be operating against the *blockdev* mapping
657 * at the time, not against the S_ISREG file which depends on those buffers.
658 * So the locking for private_list is via the private_lock in the address_space
659 * which backs the buffers. Which is different from the address_space
660 * against which the buffers are listed. So for a particular address_space,
661 * mapping->private_lock does *not* protect mapping->private_list! In fact,
662 * mapping->private_list will always be protected by the backing blockdev's
663 * ->private_lock.
665 * Which introduces a requirement: all buffers on an address_space's
666 * ->private_list must be from the same address_space: the blockdev's.
668 * address_spaces which do not place buffers at ->private_list via these
669 * utility functions are free to use private_lock and private_list for
670 * whatever they want. The only requirement is that list_empty(private_list)
671 * be true at clear_inode() time.
673 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
674 * filesystems should do that. invalidate_inode_buffers() should just go
675 * BUG_ON(!list_empty).
677 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
678 * take an address_space, not an inode. And it should be called
679 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
680 * queued up.
682 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
683 * list if it is already on a list. Because if the buffer is on a list,
684 * it *must* already be on the right one. If not, the filesystem is being
685 * silly. This will save a ton of locking. But first we have to ensure
686 * that buffers are taken *off* the old inode's list when they are freed
687 * (presumably in truncate). That requires careful auditing of all
688 * filesystems (do it inside bforget()). It could also be done by bringing
689 * b_inode back.
692 void buffer_insert_list(spinlock_t *lock,
693 struct buffer_head *bh, struct list_head *list)
695 spin_lock(lock);
696 list_move_tail(&bh->b_assoc_buffers, list);
697 spin_unlock(lock);
701 * The buffer's backing address_space's private_lock must be held
703 static inline void __remove_assoc_queue(struct buffer_head *bh)
705 list_del_init(&bh->b_assoc_buffers);
708 int inode_has_buffers(struct inode *inode)
710 return !list_empty(&inode->i_data.private_list);
714 * osync is designed to support O_SYNC io. It waits synchronously for
715 * all already-submitted IO to complete, but does not queue any new
716 * writes to the disk.
718 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
719 * you dirty the buffers, and then use osync_inode_buffers to wait for
720 * completion. Any other dirty buffers which are not yet queued for
721 * write will not be flushed to disk by the osync.
723 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
725 struct buffer_head *bh;
726 struct list_head *p;
727 int err = 0;
729 spin_lock(lock);
730 repeat:
731 list_for_each_prev(p, list) {
732 bh = BH_ENTRY(p);
733 if (buffer_locked(bh)) {
734 get_bh(bh);
735 spin_unlock(lock);
736 wait_on_buffer(bh);
737 if (!buffer_uptodate(bh))
738 err = -EIO;
739 brelse(bh);
740 spin_lock(lock);
741 goto repeat;
744 spin_unlock(lock);
745 return err;
749 * sync_mapping_buffers - write out and wait upon a mapping's "associated"
750 * buffers
751 * @buffer_mapping - the mapping which backs the buffers' data
752 * @mapping - the mapping which wants those buffers written
754 * Starts I/O against the buffers at mapping->private_list, and waits upon
755 * that I/O.
757 * Basically, this is a convenience function for fsync(). @buffer_mapping is
758 * the blockdev which "owns" the buffers and @mapping is a file or directory
759 * which needs those buffers to be written for a successful fsync().
761 int sync_mapping_buffers(struct address_space *mapping)
763 struct address_space *buffer_mapping = mapping->assoc_mapping;
765 if (buffer_mapping == NULL || list_empty(&mapping->private_list))
766 return 0;
768 return fsync_buffers_list(&buffer_mapping->private_lock,
769 &mapping->private_list);
771 EXPORT_SYMBOL(sync_mapping_buffers);
774 * Called when we've recently written block `bblock', and it is known that
775 * `bblock' was for a buffer_boundary() buffer. This means that the block at
776 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
777 * dirty, schedule it for IO. So that indirects merge nicely with their data.
779 void write_boundary_block(struct block_device *bdev,
780 sector_t bblock, unsigned blocksize)
782 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
783 if (bh) {
784 if (buffer_dirty(bh))
785 ll_rw_block(WRITE, 1, &bh);
786 put_bh(bh);
790 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
792 struct address_space *mapping = inode->i_mapping;
793 struct address_space *buffer_mapping = bh->b_page->mapping;
795 mark_buffer_dirty(bh);
796 if (!mapping->assoc_mapping) {
797 mapping->assoc_mapping = buffer_mapping;
798 } else {
799 if (mapping->assoc_mapping != buffer_mapping)
800 BUG();
802 if (list_empty(&bh->b_assoc_buffers))
803 buffer_insert_list(&buffer_mapping->private_lock,
804 bh, &mapping->private_list);
806 EXPORT_SYMBOL(mark_buffer_dirty_inode);
809 * Add a page to the dirty page list.
811 * It is a sad fact of life that this function is called from several places
812 * deeply under spinlocking. It may not sleep.
814 * If the page has buffers, the uptodate buffers are set dirty, to preserve
815 * dirty-state coherency between the page and the buffers. It the page does
816 * not have buffers then when they are later attached they will all be set
817 * dirty.
819 * The buffers are dirtied before the page is dirtied. There's a small race
820 * window in which a writepage caller may see the page cleanness but not the
821 * buffer dirtiness. That's fine. If this code were to set the page dirty
822 * before the buffers, a concurrent writepage caller could clear the page dirty
823 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
824 * page on the dirty page list.
826 * There is also a small window where the page is dirty, and not on dirty_pages.
827 * Also a possibility that by the time the page is added to dirty_pages, it has
828 * been set clean. The page lists are somewhat approximate in this regard.
829 * It's better to have clean pages accidentally attached to dirty_pages than to
830 * leave dirty pages attached to clean_pages.
832 * We use private_lock to lock against try_to_free_buffers while using the
833 * page's buffer list. Also use this to protect against clean buffers being
834 * added to the page after it was set dirty.
836 * FIXME: may need to call ->reservepage here as well. That's rather up to the
837 * address_space though.
839 * For now, we treat swapper_space specially. It doesn't use the normal
840 * block a_ops.
842 int __set_page_dirty_buffers(struct page *page)
844 struct address_space * const mapping = page->mapping;
845 int ret = 0;
847 if (mapping == NULL) {
848 SetPageDirty(page);
849 goto out;
852 spin_lock(&mapping->private_lock);
853 if (page_has_buffers(page)) {
854 struct buffer_head *head = page_buffers(page);
855 struct buffer_head *bh = head;
857 do {
858 if (buffer_uptodate(bh))
859 set_buffer_dirty(bh);
860 else
861 buffer_error();
862 bh = bh->b_this_page;
863 } while (bh != head);
865 spin_unlock(&mapping->private_lock);
867 if (!TestSetPageDirty(page)) {
868 spin_lock(&mapping->page_lock);
869 if (page->mapping) { /* Race with truncate? */
870 if (!mapping->backing_dev_info->memory_backed)
871 inc_page_state(nr_dirty);
872 list_del(&page->list);
873 list_add(&page->list, &mapping->dirty_pages);
875 spin_unlock(&mapping->page_lock);
876 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
879 out:
880 return ret;
882 EXPORT_SYMBOL(__set_page_dirty_buffers);
885 * Write out and wait upon a list of buffers.
887 * We have conflicting pressures: we want to make sure that all
888 * initially dirty buffers get waited on, but that any subsequently
889 * dirtied buffers don't. After all, we don't want fsync to last
890 * forever if somebody is actively writing to the file.
892 * Do this in two main stages: first we copy dirty buffers to a
893 * temporary inode list, queueing the writes as we go. Then we clean
894 * up, waiting for those writes to complete.
896 * During this second stage, any subsequent updates to the file may end
897 * up refiling the buffer on the original inode's dirty list again, so
898 * there is a chance we will end up with a buffer queued for write but
899 * not yet completed on that list. So, as a final cleanup we go through
900 * the osync code to catch these locked, dirty buffers without requeuing
901 * any newly dirty buffers for write.
903 int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
905 struct buffer_head *bh;
906 struct list_head tmp;
907 int err = 0, err2;
909 INIT_LIST_HEAD(&tmp);
911 spin_lock(lock);
912 while (!list_empty(list)) {
913 bh = BH_ENTRY(list->next);
914 list_del_init(&bh->b_assoc_buffers);
915 if (buffer_dirty(bh) || buffer_locked(bh)) {
916 list_add(&bh->b_assoc_buffers, &tmp);
917 if (buffer_dirty(bh)) {
918 get_bh(bh);
919 spin_unlock(lock);
921 * Ensure any pending I/O completes so that
922 * ll_rw_block() actually writes the current
923 * contents - it is a noop if I/O is still in
924 * flight on potentially older contents.
926 wait_on_buffer(bh);
927 ll_rw_block(WRITE, 1, &bh);
928 brelse(bh);
929 spin_lock(lock);
934 while (!list_empty(&tmp)) {
935 bh = BH_ENTRY(tmp.prev);
936 __remove_assoc_queue(bh);
937 get_bh(bh);
938 spin_unlock(lock);
939 wait_on_buffer(bh);
940 if (!buffer_uptodate(bh))
941 err = -EIO;
942 brelse(bh);
943 spin_lock(lock);
946 spin_unlock(lock);
947 err2 = osync_buffers_list(lock, list);
948 if (err)
949 return err;
950 else
951 return err2;
955 * Invalidate any and all dirty buffers on a given inode. We are
956 * probably unmounting the fs, but that doesn't mean we have already
957 * done a sync(). Just drop the buffers from the inode list.
959 * NOTE: we take the inode's blockdev's mapping's private_lock. Which
960 * assumes that all the buffers are against the blockdev. Not true
961 * for reiserfs.
963 void invalidate_inode_buffers(struct inode *inode)
965 if (inode_has_buffers(inode)) {
966 struct address_space *mapping = &inode->i_data;
967 struct list_head *list = &mapping->private_list;
968 struct address_space *buffer_mapping = mapping->assoc_mapping;
970 spin_lock(&buffer_mapping->private_lock);
971 while (!list_empty(list))
972 __remove_assoc_queue(BH_ENTRY(list->next));
973 spin_unlock(&buffer_mapping->private_lock);
978 * Remove any clean buffers from the inode's buffer list. This is called
979 * when we're trying to free the inode itself. Those buffers can pin it.
981 * Returns true if all buffers were removed.
983 int remove_inode_buffers(struct inode *inode)
985 int ret = 1;
987 if (inode_has_buffers(inode)) {
988 struct address_space *mapping = &inode->i_data;
989 struct list_head *list = &mapping->private_list;
990 struct address_space *buffer_mapping = mapping->assoc_mapping;
992 spin_lock(&buffer_mapping->private_lock);
993 while (!list_empty(list)) {
994 struct buffer_head *bh = BH_ENTRY(list->next);
995 if (buffer_dirty(bh)) {
996 ret = 0;
997 break;
999 __remove_assoc_queue(bh);
1001 spin_unlock(&buffer_mapping->private_lock);
1003 return ret;
1007 * Create the appropriate buffers when given a page for data area and
1008 * the size of each buffer.. Use the bh->b_this_page linked list to
1009 * follow the buffers created. Return NULL if unable to create more
1010 * buffers.
1012 * The retry flag is used to differentiate async IO (paging, swapping)
1013 * which may not fail from ordinary buffer allocations.
1015 static struct buffer_head *
1016 create_buffers(struct page * page, unsigned long size, int retry)
1018 struct buffer_head *bh, *head;
1019 long offset;
1021 try_again:
1022 head = NULL;
1023 offset = PAGE_SIZE;
1024 while ((offset -= size) >= 0) {
1025 bh = alloc_buffer_head(GFP_NOFS);
1026 if (!bh)
1027 goto no_grow;
1029 bh->b_bdev = NULL;
1030 bh->b_this_page = head;
1031 bh->b_blocknr = -1;
1032 head = bh;
1034 bh->b_state = 0;
1035 atomic_set(&bh->b_count, 0);
1036 bh->b_size = size;
1038 /* Link the buffer to its page */
1039 set_bh_page(bh, page, offset);
1041 bh->b_end_io = NULL;
1043 return head;
1045 * In case anything failed, we just free everything we got.
1047 no_grow:
1048 if (head) {
1049 do {
1050 bh = head;
1051 head = head->b_this_page;
1052 free_buffer_head(bh);
1053 } while (head);
1057 * Return failure for non-async IO requests. Async IO requests
1058 * are not allowed to fail, so we have to wait until buffer heads
1059 * become available. But we don't want tasks sleeping with
1060 * partially complete buffers, so all were released above.
1062 if (!retry)
1063 return NULL;
1065 /* We're _really_ low on memory. Now we just
1066 * wait for old buffer heads to become free due to
1067 * finishing IO. Since this is an async request and
1068 * the reserve list is empty, we're sure there are
1069 * async buffer heads in use.
1071 free_more_memory();
1072 goto try_again;
1075 static inline void
1076 link_dev_buffers(struct page *page, struct buffer_head *head)
1078 struct buffer_head *bh, *tail;
1080 bh = head;
1081 do {
1082 tail = bh;
1083 bh = bh->b_this_page;
1084 } while (bh);
1085 tail->b_this_page = head;
1086 __set_page_buffers(page, head);
1090 * Initialise the state of a blockdev page's buffers.
1092 static void
1093 init_page_buffers(struct page *page, struct block_device *bdev,
1094 int block, int size)
1096 struct buffer_head *head = page_buffers(page);
1097 struct buffer_head *bh = head;
1098 unsigned int b_state;
1100 b_state = 1 << BH_Mapped;
1101 if (PageUptodate(page))
1102 b_state |= 1 << BH_Uptodate;
1104 do {
1105 if (!(bh->b_state & (1 << BH_Mapped))) {
1106 init_buffer(bh, NULL, NULL);
1107 bh->b_bdev = bdev;
1108 bh->b_blocknr = block;
1109 bh->b_state = b_state;
1111 block++;
1112 bh = bh->b_this_page;
1113 } while (bh != head);
1117 * Create the page-cache page that contains the requested block.
1119 * This is user purely for blockdev mappings.
1121 static struct page *
1122 grow_dev_page(struct block_device *bdev, unsigned long block,
1123 unsigned long index, int size)
1125 struct inode *inode = bdev->bd_inode;
1126 struct page *page;
1127 struct buffer_head *bh;
1129 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
1130 if (!page)
1131 return NULL;
1133 if (!PageLocked(page))
1134 BUG();
1136 if (page_has_buffers(page)) {
1137 bh = page_buffers(page);
1138 if (bh->b_size == size)
1139 return page;
1140 if (!try_to_free_buffers(page))
1141 goto failed;
1145 * Allocate some buffers for this page
1147 bh = create_buffers(page, size, 0);
1148 if (!bh)
1149 goto failed;
1152 * Link the page to the buffers and initialise them. Take the
1153 * lock to be atomic wrt __find_get_block(), which does not
1154 * run under the page lock.
1156 spin_lock(&inode->i_mapping->private_lock);
1157 link_dev_buffers(page, bh);
1158 init_page_buffers(page, bdev, block, size);
1159 spin_unlock(&inode->i_mapping->private_lock);
1160 return page;
1162 failed:
1163 buffer_error();
1164 unlock_page(page);
1165 page_cache_release(page);
1166 return NULL;
1170 * Create buffers for the specified block device block's page. If
1171 * that page was dirty, the buffers are set dirty also.
1173 * Except that's a bug. Attaching dirty buffers to a dirty
1174 * blockdev's page can result in filesystem corruption, because
1175 * some of those buffers may be aliases of filesystem data.
1176 * grow_dev_page() will go BUG() if this happens.
1178 static inline int
1179 grow_buffers(struct block_device *bdev, unsigned long block, int size)
1181 struct page *page;
1182 unsigned long index;
1183 int sizebits;
1185 /* Size must be multiple of hard sectorsize */
1186 if (size & (bdev_hardsect_size(bdev)-1))
1187 BUG();
1188 if (size < 512 || size > PAGE_SIZE)
1189 BUG();
1191 sizebits = -1;
1192 do {
1193 sizebits++;
1194 } while ((size << sizebits) < PAGE_SIZE);
1196 index = block >> sizebits;
1197 block = index << sizebits;
1199 /* Create a page with the proper size buffers.. */
1200 page = grow_dev_page(bdev, block, index, size);
1201 if (!page)
1202 return 0;
1203 unlock_page(page);
1204 page_cache_release(page);
1205 return 1;
1208 struct buffer_head *
1209 __getblk_slow(struct block_device *bdev, sector_t block, int size)
1211 for (;;) {
1212 struct buffer_head * bh;
1214 bh = __find_get_block(bdev, block, size);
1215 if (bh)
1216 return bh;
1218 if (!grow_buffers(bdev, block, size))
1219 free_more_memory();
1224 * The relationship between dirty buffers and dirty pages:
1226 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1227 * the page appears on its address_space.dirty_pages list.
1229 * At all times, the dirtiness of the buffers represents the dirtiness of
1230 * subsections of the page. If the page has buffers, the page dirty bit is
1231 * merely a hint about the true dirty state.
1233 * When a page is set dirty in its entirety, all its buffers are marked dirty
1234 * (if the page has buffers).
1236 * When a buffer is marked dirty, its page is dirtied, but the page's other
1237 * buffers are not.
1239 * Also. When blockdev buffers are explicitly read with bread(), they
1240 * individually become uptodate. But their backing page remains not
1241 * uptodate - even if all of its buffers are uptodate. A subsequent
1242 * block_read_full_page() against that page will discover all the uptodate
1243 * buffers, will set the page uptodate and will perform no I/O.
1247 * mark_buffer_dirty - mark a buffer_head as needing writeout
1249 * mark_buffer_dirty() will set the dirty bit against the buffer,
1250 * then set its backing page dirty, then attach the page to its
1251 * address_space's dirty_pages list and then attach the address_space's
1252 * inode to its superblock's dirty inode list.
1254 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1255 * mapping->page_lock and the global inode_lock.
1257 void mark_buffer_dirty(struct buffer_head *bh)
1259 if (!buffer_uptodate(bh))
1260 buffer_error();
1261 if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
1262 __set_page_dirty_nobuffers(bh->b_page);
1266 * Decrement a buffer_head's reference count. If all buffers against a page
1267 * have zero reference count, are clean and unlocked, and if the page is clean
1268 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1269 * in preparation for freeing it (sometimes, rarely, buffers are removed from
1270 * a page but it ends up not being freed, and buffers may later be reattached).
1272 void __brelse(struct buffer_head * buf)
1274 if (atomic_read(&buf->b_count)) {
1275 put_bh(buf);
1276 return;
1278 printk(KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1279 buffer_error(); /* For the stack backtrace */
1283 * bforget() is like brelse(), except it discards any
1284 * potentially dirty data.
1286 void __bforget(struct buffer_head *bh)
1288 clear_buffer_dirty(bh);
1289 if (!list_empty(&bh->b_assoc_buffers)) {
1290 struct address_space *buffer_mapping = bh->b_page->mapping;
1292 spin_lock(&buffer_mapping->private_lock);
1293 list_del_init(&bh->b_assoc_buffers);
1294 spin_unlock(&buffer_mapping->private_lock);
1296 __brelse(bh);
1299 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1301 lock_buffer(bh);
1302 if (buffer_uptodate(bh)) {
1303 unlock_buffer(bh);
1304 return bh;
1305 } else {
1306 if (buffer_dirty(bh))
1307 buffer_error();
1308 get_bh(bh);
1309 bh->b_end_io = end_buffer_read_sync;
1310 submit_bh(READ, bh);
1311 wait_on_buffer(bh);
1312 if (buffer_uptodate(bh))
1313 return bh;
1315 brelse(bh);
1316 return NULL;
1320 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1321 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1322 * refcount elevated by one when they're in an LRU. A buffer can only appear
1323 * once in a particular CPU's LRU. A single buffer can be present in multiple
1324 * CPU's LRUs at the same time.
1326 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1327 * sb_find_get_block().
1329 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1330 * a local interrupt disable for that.
1333 #define BH_LRU_SIZE 8
1335 struct bh_lru {
1336 struct buffer_head *bhs[BH_LRU_SIZE];
1339 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{0}};
1341 #ifdef CONFIG_SMP
1342 #define bh_lru_lock() local_irq_disable()
1343 #define bh_lru_unlock() local_irq_enable()
1344 #else
1345 #define bh_lru_lock() preempt_disable()
1346 #define bh_lru_unlock() preempt_enable()
1347 #endif
1349 static inline void check_irqs_on(void)
1351 #ifdef irqs_disabled
1352 BUG_ON(irqs_disabled());
1353 #endif
1357 * The LRU management algorithm is dopey-but-simple. Sorry.
1359 static void bh_lru_install(struct buffer_head *bh)
1361 struct buffer_head *evictee = NULL;
1362 struct bh_lru *lru;
1364 check_irqs_on();
1365 bh_lru_lock();
1366 lru = &__get_cpu_var(bh_lrus);
1367 if (lru->bhs[0] != bh) {
1368 struct buffer_head *bhs[BH_LRU_SIZE];
1369 int in;
1370 int out = 0;
1372 get_bh(bh);
1373 bhs[out++] = bh;
1374 for (in = 0; in < BH_LRU_SIZE; in++) {
1375 struct buffer_head *bh2 = lru->bhs[in];
1377 if (bh2 == bh) {
1378 __brelse(bh2);
1379 } else {
1380 if (out >= BH_LRU_SIZE) {
1381 BUG_ON(evictee != NULL);
1382 evictee = bh2;
1383 } else {
1384 bhs[out++] = bh2;
1388 while (out < BH_LRU_SIZE)
1389 bhs[out++] = NULL;
1390 memcpy(lru->bhs, bhs, sizeof(bhs));
1392 bh_lru_unlock();
1394 if (evictee)
1395 __brelse(evictee);
1399 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1401 static inline struct buffer_head *
1402 lookup_bh_lru(struct block_device *bdev, sector_t block, int size)
1404 struct buffer_head *ret = NULL;
1405 struct bh_lru *lru;
1406 int i;
1408 check_irqs_on();
1409 bh_lru_lock();
1410 lru = &__get_cpu_var(bh_lrus);
1411 for (i = 0; i < BH_LRU_SIZE; i++) {
1412 struct buffer_head *bh = lru->bhs[i];
1414 if (bh && bh->b_bdev == bdev &&
1415 bh->b_blocknr == block && bh->b_size == size) {
1416 if (i) {
1417 while (i) {
1418 lru->bhs[i] = lru->bhs[i - 1];
1419 i--;
1421 lru->bhs[0] = bh;
1423 get_bh(bh);
1424 ret = bh;
1425 break;
1428 bh_lru_unlock();
1429 return ret;
1433 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1434 * it in the LRU and mark it as accessed. If it is not present then return
1435 * NULL
1437 struct buffer_head *
1438 __find_get_block(struct block_device *bdev, sector_t block, int size)
1440 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1442 if (bh == NULL) {
1443 bh = __find_get_block_slow(bdev, block, size);
1444 if (bh)
1445 bh_lru_install(bh);
1447 if (bh)
1448 touch_buffer(bh);
1449 return bh;
1451 EXPORT_SYMBOL(__find_get_block);
1454 * __getblk will locate (and, if necessary, create) the buffer_head
1455 * which corresponds to the passed block_device, block and size. The
1456 * returned buffer has its reference count incremented.
1458 * __getblk() cannot fail - it just keeps trying. If you pass it an
1459 * illegal block number, __getblk() will happily return a buffer_head
1460 * which represents the non-existent block. Very weird.
1462 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1463 * attempt is failing. FIXME, perhaps?
1465 struct buffer_head *
1466 __getblk(struct block_device *bdev, sector_t block, int size)
1468 struct buffer_head *bh = __find_get_block(bdev, block, size);
1470 if (bh == NULL)
1471 bh = __getblk_slow(bdev, block, size);
1472 return bh;
1474 EXPORT_SYMBOL(__getblk);
1477 * Do async read-ahead on a buffer..
1479 void __breadahead(struct block_device *bdev, sector_t block, int size)
1481 struct buffer_head *bh = __getblk(bdev, block, size);
1482 ll_rw_block(READA, 1, &bh);
1483 brelse(bh);
1485 EXPORT_SYMBOL(__breadahead);
1488 * __bread() - reads a specified block and returns the bh
1489 * @block: number of block
1490 * @size: size (in bytes) to read
1492 * Reads a specified block, and returns buffer head that contains it.
1493 * It returns NULL if the block was unreadable.
1495 struct buffer_head *
1496 __bread(struct block_device *bdev, sector_t block, int size)
1498 struct buffer_head *bh = __getblk(bdev, block, size);
1500 if (!buffer_uptodate(bh))
1501 bh = __bread_slow(bh);
1502 return bh;
1504 EXPORT_SYMBOL(__bread);
1507 * invalidate_bh_lrus() is called rarely - at unmount. Because it is only for
1508 * unmount it only needs to ensure that all buffers from the target device are
1509 * invalidated on return and it doesn't need to worry about new buffers from
1510 * that device being added - the unmount code has to prevent that.
1512 static void invalidate_bh_lru(void *arg)
1514 struct bh_lru *b = &get_cpu_var(bh_lrus);
1515 int i;
1517 for (i = 0; i < BH_LRU_SIZE; i++) {
1518 brelse(b->bhs[i]);
1519 b->bhs[i] = NULL;
1521 put_cpu_var(bh_lrus);
1524 static void invalidate_bh_lrus(void)
1526 on_each_cpu(invalidate_bh_lru, NULL, 1, 1);
1529 void set_bh_page(struct buffer_head *bh,
1530 struct page *page, unsigned long offset)
1532 bh->b_page = page;
1533 if (offset >= PAGE_SIZE)
1534 BUG();
1535 if (PageHighMem(page))
1537 * This catches illegal uses and preserves the offset:
1539 bh->b_data = (char *)(0 + offset);
1540 else
1541 bh->b_data = page_address(page) + offset;
1543 EXPORT_SYMBOL(set_bh_page);
1546 * Called when truncating a buffer on a page completely.
1548 static inline void discard_buffer(struct buffer_head * bh)
1550 lock_buffer(bh);
1551 clear_buffer_dirty(bh);
1552 bh->b_bdev = NULL;
1553 clear_buffer_mapped(bh);
1554 clear_buffer_req(bh);
1555 clear_buffer_new(bh);
1556 clear_buffer_delay(bh);
1557 unlock_buffer(bh);
1561 * try_to_release_page() - release old fs-specific metadata on a page
1563 * @page: the page which the kernel is trying to free
1564 * @gfp_mask: memory allocation flags (and I/O mode)
1566 * The address_space is to try to release any data against the page
1567 * (presumably at page->private). If the release was successful, return `1'.
1568 * Otherwise return zero.
1570 * The @gfp_mask argument specifies whether I/O may be performed to release
1571 * this page (__GFP_IO), and whether the call may block (__GFP_WAIT).
1573 * NOTE: @gfp_mask may go away, and this function may become non-blocking.
1575 int try_to_release_page(struct page *page, int gfp_mask)
1577 struct address_space * const mapping = page->mapping;
1579 if (!PageLocked(page))
1580 BUG();
1581 if (PageWriteback(page))
1582 return 0;
1584 if (mapping && mapping->a_ops->releasepage)
1585 return mapping->a_ops->releasepage(page, gfp_mask);
1586 return try_to_free_buffers(page);
1590 * block_invalidatepage - invalidate part of all of a buffer-backed page
1592 * @page: the page which is affected
1593 * @offset: the index of the truncation point
1595 * block_invalidatepage() is called when all or part of the page has become
1596 * invalidatedby a truncate operation.
1598 * block_invalidatepage() does not have to release all buffers, but it must
1599 * ensure that no dirty buffer is left outside @offset and that no I/O
1600 * is underway against any of the blocks which are outside the truncation
1601 * point. Because the caller is about to free (and possibly reuse) those
1602 * blocks on-disk.
1604 int block_invalidatepage(struct page *page, unsigned long offset)
1606 struct buffer_head *head, *bh, *next;
1607 unsigned int curr_off = 0;
1608 int ret = 1;
1610 BUG_ON(!PageLocked(page));
1611 if (!page_has_buffers(page))
1612 goto out;
1614 head = page_buffers(page);
1615 bh = head;
1616 do {
1617 unsigned int next_off = curr_off + bh->b_size;
1618 next = bh->b_this_page;
1621 * is this block fully invalidated?
1623 if (offset <= curr_off)
1624 discard_buffer(bh);
1625 curr_off = next_off;
1626 bh = next;
1627 } while (bh != head);
1630 * We release buffers only if the entire page is being invalidated.
1631 * The get_block cached value has been unconditionally invalidated,
1632 * so real IO is not possible anymore.
1634 if (offset == 0)
1635 ret = try_to_release_page(page, 0);
1636 out:
1637 return ret;
1639 EXPORT_SYMBOL(block_invalidatepage);
1642 * We attach and possibly dirty the buffers atomically wrt
1643 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
1644 * is already excluded via the page lock.
1646 void create_empty_buffers(struct page *page,
1647 unsigned long blocksize, unsigned long b_state)
1649 struct buffer_head *bh, *head, *tail;
1651 head = create_buffers(page, blocksize, 1);
1652 bh = head;
1653 do {
1654 bh->b_state |= b_state;
1655 tail = bh;
1656 bh = bh->b_this_page;
1657 } while (bh);
1658 tail->b_this_page = head;
1660 spin_lock(&page->mapping->private_lock);
1661 if (PageUptodate(page) || PageDirty(page)) {
1662 bh = head;
1663 do {
1664 if (PageDirty(page))
1665 set_buffer_dirty(bh);
1666 if (PageUptodate(page))
1667 set_buffer_uptodate(bh);
1668 bh = bh->b_this_page;
1669 } while (bh != head);
1671 __set_page_buffers(page, head);
1672 spin_unlock(&page->mapping->private_lock);
1674 EXPORT_SYMBOL(create_empty_buffers);
1677 * We are taking a block for data and we don't want any output from any
1678 * buffer-cache aliases starting from return from that function and
1679 * until the moment when something will explicitly mark the buffer
1680 * dirty (hopefully that will not happen until we will free that block ;-)
1681 * We don't even need to mark it not-uptodate - nobody can expect
1682 * anything from a newly allocated buffer anyway. We used to used
1683 * unmap_buffer() for such invalidation, but that was wrong. We definitely
1684 * don't want to mark the alias unmapped, for example - it would confuse
1685 * anyone who might pick it with bread() afterwards...
1687 * Also.. Note that bforget() doesn't lock the buffer. So there can
1688 * be writeout I/O going on against recently-freed buffers. We don't
1689 * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1690 * only if we really need to. That happens here.
1692 void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1694 struct buffer_head *old_bh;
1696 old_bh = __find_get_block_slow(bdev, block, 0);
1697 if (old_bh) {
1698 #if 0 /* This happens. Later. */
1699 if (buffer_dirty(old_bh))
1700 buffer_error();
1701 #endif
1702 clear_buffer_dirty(old_bh);
1703 wait_on_buffer(old_bh);
1704 clear_buffer_req(old_bh);
1705 __brelse(old_bh);
1708 EXPORT_SYMBOL(unmap_underlying_metadata);
1711 * NOTE! All mapped/uptodate combinations are valid:
1713 * Mapped Uptodate Meaning
1715 * No No "unknown" - must do get_block()
1716 * No Yes "hole" - zero-filled
1717 * Yes No "allocated" - allocated on disk, not read in
1718 * Yes Yes "valid" - allocated and up-to-date in memory.
1720 * "Dirty" is valid only with the last case (mapped+uptodate).
1724 * While block_write_full_page is writing back the dirty buffers under
1725 * the page lock, whoever dirtied the buffers may decide to clean them
1726 * again at any time. We handle that by only looking at the buffer
1727 * state inside lock_buffer().
1729 * If block_write_full_page() is called for regular writeback
1730 * (called_for_sync() is false) then it will redirty a page which has a locked
1731 * buffer. This only can happen if someone has written the buffer directly,
1732 * with submit_bh(). At the address_space level PageWriteback prevents this
1733 * contention from occurring.
1735 static int __block_write_full_page(struct inode *inode, struct page *page,
1736 get_block_t *get_block, struct writeback_control *wbc)
1738 int err;
1739 unsigned long block;
1740 unsigned long last_block;
1741 struct buffer_head *bh, *head;
1742 int nr_underway = 0;
1744 BUG_ON(!PageLocked(page));
1746 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1748 if (!page_has_buffers(page)) {
1749 if (!PageUptodate(page))
1750 buffer_error();
1751 create_empty_buffers(page, 1 << inode->i_blkbits,
1752 (1 << BH_Dirty)|(1 << BH_Uptodate));
1756 * Be very careful. We have no exclusion from __set_page_dirty_buffers
1757 * here, and the (potentially unmapped) buffers may become dirty at
1758 * any time. If a buffer becomes dirty here after we've inspected it
1759 * then we just miss that fact, and the page stays dirty.
1761 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1762 * handle that here by just cleaning them.
1765 block = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1766 head = page_buffers(page);
1767 bh = head;
1770 * Get all the dirty buffers mapped to disk addresses and
1771 * handle any aliases from the underlying blockdev's mapping.
1773 do {
1774 if (block > last_block) {
1776 * mapped buffers outside i_size will occur, because
1777 * this page can be outside i_size when there is a
1778 * truncate in progress.
1780 * if (buffer_mapped(bh))
1781 * buffer_error();
1784 * The buffer was zeroed by block_write_full_page()
1786 clear_buffer_dirty(bh);
1787 set_buffer_uptodate(bh);
1788 } else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
1789 if (buffer_new(bh))
1790 buffer_error();
1791 err = get_block(inode, block, bh, 1);
1792 if (err)
1793 goto recover;
1794 if (buffer_new(bh)) {
1795 /* blockdev mappings never come here */
1796 clear_buffer_new(bh);
1797 unmap_underlying_metadata(bh->b_bdev,
1798 bh->b_blocknr);
1801 bh = bh->b_this_page;
1802 block++;
1803 } while (bh != head);
1805 do {
1806 get_bh(bh);
1807 if (buffer_mapped(bh) && buffer_dirty(bh)) {
1808 if (wbc->sync_mode != WB_SYNC_NONE) {
1809 lock_buffer(bh);
1810 } else {
1811 if (test_set_buffer_locked(bh)) {
1812 __set_page_dirty_nobuffers(page);
1813 continue;
1816 if (test_clear_buffer_dirty(bh)) {
1817 if (!buffer_uptodate(bh))
1818 buffer_error();
1819 mark_buffer_async_write(bh);
1820 } else {
1821 unlock_buffer(bh);
1824 } while ((bh = bh->b_this_page) != head);
1826 BUG_ON(PageWriteback(page));
1827 SetPageWriteback(page); /* Keeps try_to_free_buffers() away */
1828 unlock_page(page);
1831 * The page may come unlocked any time after the *first* submit_bh()
1832 * call. Be careful with its buffers.
1834 do {
1835 struct buffer_head *next = bh->b_this_page;
1836 if (buffer_async_write(bh)) {
1837 submit_bh(WRITE, bh);
1838 nr_underway++;
1840 put_bh(bh);
1841 bh = next;
1842 } while (bh != head);
1844 err = 0;
1845 done:
1846 if (nr_underway == 0) {
1848 * The page was marked dirty, but the buffers were
1849 * clean. Someone wrote them back by hand with
1850 * ll_rw_block/submit_bh. A rare case.
1852 int uptodate = 1;
1853 do {
1854 if (!buffer_uptodate(bh)) {
1855 uptodate = 0;
1856 break;
1858 bh = bh->b_this_page;
1859 } while (bh != head);
1860 if (uptodate)
1861 SetPageUptodate(page);
1862 end_page_writeback(page);
1864 return err;
1866 recover:
1868 * ENOSPC, or some other error. We may already have added some
1869 * blocks to the file, so we need to write these out to avoid
1870 * exposing stale data.
1871 * The page is currently locked and not marked for writeback
1873 bh = head;
1874 /* Recovery: lock and submit the mapped buffers */
1875 do {
1876 get_bh(bh);
1877 if (buffer_mapped(bh) && buffer_dirty(bh)) {
1878 lock_buffer(bh);
1879 mark_buffer_async_write(bh);
1880 } else {
1882 * The buffer may have been set dirty during
1883 * attachment to a dirty page.
1885 clear_buffer_dirty(bh);
1887 } while ((bh = bh->b_this_page) != head);
1888 SetPageError(page);
1889 BUG_ON(PageWriteback(page));
1890 SetPageWriteback(page);
1891 unlock_page(page);
1892 do {
1893 struct buffer_head *next = bh->b_this_page;
1894 if (buffer_async_write(bh)) {
1895 clear_buffer_dirty(bh);
1896 submit_bh(WRITE, bh);
1897 nr_underway++;
1899 put_bh(bh);
1900 bh = next;
1901 } while (bh != head);
1902 goto done;
1905 static int __block_prepare_write(struct inode *inode, struct page *page,
1906 unsigned from, unsigned to, get_block_t *get_block)
1908 unsigned block_start, block_end;
1909 sector_t block;
1910 int err = 0;
1911 unsigned blocksize, bbits;
1912 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1914 BUG_ON(!PageLocked(page));
1915 BUG_ON(from > PAGE_CACHE_SIZE);
1916 BUG_ON(to > PAGE_CACHE_SIZE);
1917 BUG_ON(from > to);
1919 blocksize = 1 << inode->i_blkbits;
1920 if (!page_has_buffers(page))
1921 create_empty_buffers(page, blocksize, 0);
1922 head = page_buffers(page);
1924 bbits = inode->i_blkbits;
1925 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1927 for(bh = head, block_start = 0; bh != head || !block_start;
1928 block++, block_start=block_end, bh = bh->b_this_page) {
1929 block_end = block_start + blocksize;
1930 if (block_end <= from || block_start >= to) {
1931 if (PageUptodate(page)) {
1932 if (!buffer_uptodate(bh))
1933 set_buffer_uptodate(bh);
1935 continue;
1937 if (buffer_new(bh))
1938 clear_buffer_new(bh);
1939 if (!buffer_mapped(bh)) {
1940 err = get_block(inode, block, bh, 1);
1941 if (err)
1942 goto out;
1943 if (buffer_new(bh)) {
1944 clear_buffer_new(bh);
1945 unmap_underlying_metadata(bh->b_bdev,
1946 bh->b_blocknr);
1947 if (PageUptodate(page)) {
1948 if (!buffer_mapped(bh))
1949 buffer_error();
1950 set_buffer_uptodate(bh);
1951 continue;
1953 if (block_end > to || block_start < from) {
1954 void *kaddr;
1956 kaddr = kmap_atomic(page, KM_USER0);
1957 if (block_end > to)
1958 memset(kaddr+to, 0,
1959 block_end-to);
1960 if (block_start < from)
1961 memset(kaddr+block_start,
1962 0, from-block_start);
1963 flush_dcache_page(page);
1964 kunmap_atomic(kaddr, KM_USER0);
1966 continue;
1969 if (PageUptodate(page)) {
1970 if (!buffer_uptodate(bh))
1971 set_buffer_uptodate(bh);
1972 continue;
1974 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1975 (block_start < from || block_end > to)) {
1976 ll_rw_block(READ, 1, &bh);
1977 *wait_bh++=bh;
1981 * If we issued read requests - let them complete.
1983 while(wait_bh > wait) {
1984 wait_on_buffer(*--wait_bh);
1985 if (!buffer_uptodate(*wait_bh))
1986 return -EIO;
1988 return 0;
1989 out:
1991 * Zero out any newly allocated blocks to avoid exposing stale
1992 * data. If BH_New is set, we know that the block was newly
1993 * allocated in the above loop.
1995 bh = head;
1996 block_start = 0;
1997 do {
1998 block_end = block_start+blocksize;
1999 if (block_end <= from)
2000 goto next_bh;
2001 if (block_start >= to)
2002 break;
2003 if (buffer_new(bh)) {
2004 void *kaddr;
2006 clear_buffer_new(bh);
2007 if (buffer_uptodate(bh))
2008 buffer_error();
2009 kaddr = kmap_atomic(page, KM_USER0);
2010 memset(kaddr+block_start, 0, bh->b_size);
2011 kunmap_atomic(kaddr, KM_USER0);
2012 set_buffer_uptodate(bh);
2013 mark_buffer_dirty(bh);
2015 next_bh:
2016 block_start = block_end;
2017 bh = bh->b_this_page;
2018 } while (bh != head);
2019 return err;
2022 static int __block_commit_write(struct inode *inode, struct page *page,
2023 unsigned from, unsigned to)
2025 unsigned block_start, block_end;
2026 int partial = 0;
2027 unsigned blocksize;
2028 struct buffer_head *bh, *head;
2030 blocksize = 1 << inode->i_blkbits;
2032 for(bh = head = page_buffers(page), block_start = 0;
2033 bh != head || !block_start;
2034 block_start=block_end, bh = bh->b_this_page) {
2035 block_end = block_start + blocksize;
2036 if (block_end <= from || block_start >= to) {
2037 if (!buffer_uptodate(bh))
2038 partial = 1;
2039 } else {
2040 set_buffer_uptodate(bh);
2041 mark_buffer_dirty(bh);
2046 * If this is a partial write which happened to make all buffers
2047 * uptodate then we can optimize away a bogus readpage() for
2048 * the next read(). Here we 'discover' whether the page went
2049 * uptodate as a result of this (potentially partial) write.
2051 if (!partial)
2052 SetPageUptodate(page);
2053 return 0;
2057 * Generic "read page" function for block devices that have the normal
2058 * get_block functionality. This is most of the block device filesystems.
2059 * Reads the page asynchronously --- the unlock_buffer() and
2060 * set/clear_buffer_uptodate() functions propagate buffer state into the
2061 * page struct once IO has completed.
2063 int block_read_full_page(struct page *page, get_block_t *get_block)
2065 struct inode *inode = page->mapping->host;
2066 sector_t iblock, lblock;
2067 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2068 unsigned int blocksize;
2069 int nr, i;
2070 int fully_mapped = 1;
2072 if (!PageLocked(page))
2073 PAGE_BUG(page);
2074 if (PageUptodate(page))
2075 buffer_error();
2076 blocksize = 1 << inode->i_blkbits;
2077 if (!page_has_buffers(page))
2078 create_empty_buffers(page, blocksize, 0);
2079 head = page_buffers(page);
2081 iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2082 lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2083 bh = head;
2084 nr = 0;
2085 i = 0;
2087 do {
2088 if (buffer_uptodate(bh))
2089 continue;
2091 if (!buffer_mapped(bh)) {
2092 fully_mapped = 0;
2093 if (iblock < lblock) {
2094 if (get_block(inode, iblock, bh, 0))
2095 SetPageError(page);
2097 if (!buffer_mapped(bh)) {
2098 void *kaddr = kmap_atomic(page, KM_USER0);
2099 memset(kaddr + i * blocksize, 0, blocksize);
2100 flush_dcache_page(page);
2101 kunmap_atomic(kaddr, KM_USER0);
2102 set_buffer_uptodate(bh);
2103 continue;
2106 * get_block() might have updated the buffer
2107 * synchronously
2109 if (buffer_uptodate(bh))
2110 continue;
2112 arr[nr++] = bh;
2113 } while (i++, iblock++, (bh = bh->b_this_page) != head);
2115 if (fully_mapped)
2116 SetPageMappedToDisk(page);
2118 if (!nr) {
2120 * All buffers are uptodate - we can set the page uptodate
2121 * as well. But not if get_block() returned an error.
2123 if (!PageError(page))
2124 SetPageUptodate(page);
2125 unlock_page(page);
2126 return 0;
2129 /* Stage two: lock the buffers */
2130 for (i = 0; i < nr; i++) {
2131 bh = arr[i];
2132 lock_buffer(bh);
2133 mark_buffer_async_read(bh);
2137 * Stage 3: start the IO. Check for uptodateness
2138 * inside the buffer lock in case another process reading
2139 * the underlying blockdev brought it uptodate (the sct fix).
2141 for (i = 0; i < nr; i++) {
2142 bh = arr[i];
2143 if (buffer_uptodate(bh))
2144 end_buffer_async_read(bh, 1);
2145 else
2146 submit_bh(READ, bh);
2148 return 0;
2151 /* utility function for filesystems that need to do work on expanding
2152 * truncates. Uses prepare/commit_write to allow the filesystem to
2153 * deal with the hole.
2155 int generic_cont_expand(struct inode *inode, loff_t size)
2157 struct address_space *mapping = inode->i_mapping;
2158 struct page *page;
2159 unsigned long index, offset, limit;
2160 int err;
2162 err = -EFBIG;
2163 limit = current->rlim[RLIMIT_FSIZE].rlim_cur;
2164 if (limit != RLIM_INFINITY && size > (loff_t)limit) {
2165 send_sig(SIGXFSZ, current, 0);
2166 goto out;
2168 if (size > inode->i_sb->s_maxbytes)
2169 goto out;
2171 offset = (size & (PAGE_CACHE_SIZE-1)); /* Within page */
2173 /* ugh. in prepare/commit_write, if from==to==start of block, we
2174 ** skip the prepare. make sure we never send an offset for the start
2175 ** of a block
2177 if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {
2178 offset++;
2180 index = size >> PAGE_CACHE_SHIFT;
2181 err = -ENOMEM;
2182 page = grab_cache_page(mapping, index);
2183 if (!page)
2184 goto out;
2185 err = mapping->a_ops->prepare_write(NULL, page, offset, offset);
2186 if (!err) {
2187 err = mapping->a_ops->commit_write(NULL, page, offset, offset);
2189 unlock_page(page);
2190 page_cache_release(page);
2191 if (err > 0)
2192 err = 0;
2193 out:
2194 return err;
2198 * For moronic filesystems that do not allow holes in file.
2199 * We may have to extend the file.
2202 int cont_prepare_write(struct page *page, unsigned offset,
2203 unsigned to, get_block_t *get_block, loff_t *bytes)
2205 struct address_space *mapping = page->mapping;
2206 struct inode *inode = mapping->host;
2207 struct page *new_page;
2208 unsigned long pgpos;
2209 long status;
2210 unsigned zerofrom;
2211 unsigned blocksize = 1 << inode->i_blkbits;
2212 void *kaddr;
2214 while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) {
2215 status = -ENOMEM;
2216 new_page = grab_cache_page(mapping, pgpos);
2217 if (!new_page)
2218 goto out;
2219 /* we might sleep */
2220 if (*bytes>>PAGE_CACHE_SHIFT != pgpos) {
2221 unlock_page(new_page);
2222 page_cache_release(new_page);
2223 continue;
2225 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2226 if (zerofrom & (blocksize-1)) {
2227 *bytes |= (blocksize-1);
2228 (*bytes)++;
2230 status = __block_prepare_write(inode, new_page, zerofrom,
2231 PAGE_CACHE_SIZE, get_block);
2232 if (status)
2233 goto out_unmap;
2234 kaddr = kmap_atomic(new_page, KM_USER0);
2235 memset(kaddr+zerofrom, 0, PAGE_CACHE_SIZE-zerofrom);
2236 flush_dcache_page(new_page);
2237 kunmap_atomic(kaddr, KM_USER0);
2238 __block_commit_write(inode, new_page,
2239 zerofrom, PAGE_CACHE_SIZE);
2240 unlock_page(new_page);
2241 page_cache_release(new_page);
2244 if (page->index < pgpos) {
2245 /* completely inside the area */
2246 zerofrom = offset;
2247 } else {
2248 /* page covers the boundary, find the boundary offset */
2249 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2251 /* if we will expand the thing last block will be filled */
2252 if (to > zerofrom && (zerofrom & (blocksize-1))) {
2253 *bytes |= (blocksize-1);
2254 (*bytes)++;
2257 /* starting below the boundary? Nothing to zero out */
2258 if (offset <= zerofrom)
2259 zerofrom = offset;
2261 status = __block_prepare_write(inode, page, zerofrom, to, get_block);
2262 if (status)
2263 goto out1;
2264 if (zerofrom < offset) {
2265 kaddr = kmap_atomic(page, KM_USER0);
2266 memset(kaddr+zerofrom, 0, offset-zerofrom);
2267 flush_dcache_page(page);
2268 kunmap_atomic(kaddr, KM_USER0);
2269 __block_commit_write(inode, page, zerofrom, offset);
2271 return 0;
2272 out1:
2273 ClearPageUptodate(page);
2274 return status;
2276 out_unmap:
2277 ClearPageUptodate(new_page);
2278 unlock_page(new_page);
2279 page_cache_release(new_page);
2280 out:
2281 return status;
2284 int block_prepare_write(struct page *page, unsigned from, unsigned to,
2285 get_block_t *get_block)
2287 struct inode *inode = page->mapping->host;
2288 int err = __block_prepare_write(inode, page, from, to, get_block);
2289 if (err)
2290 ClearPageUptodate(page);
2291 return err;
2294 int block_commit_write(struct page *page, unsigned from, unsigned to)
2296 struct inode *inode = page->mapping->host;
2297 __block_commit_write(inode,page,from,to);
2298 return 0;
2301 int generic_commit_write(struct file *file, struct page *page,
2302 unsigned from, unsigned to)
2304 struct inode *inode = page->mapping->host;
2305 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2306 __block_commit_write(inode,page,from,to);
2308 * No need to use i_size_read() here, the i_size
2309 * cannot change under us because we hold i_sem.
2311 if (pos > inode->i_size) {
2312 i_size_write(inode, pos);
2313 mark_inode_dirty(inode);
2315 return 0;
2319 * On entry, the page is fully not uptodate.
2320 * On exit the page is fully uptodate in the areas outside (from,to)
2322 int nobh_prepare_write(struct page *page, unsigned from, unsigned to,
2323 get_block_t *get_block)
2325 struct inode *inode = page->mapping->host;
2326 const unsigned blkbits = inode->i_blkbits;
2327 const unsigned blocksize = 1 << blkbits;
2328 struct buffer_head map_bh;
2329 struct buffer_head *read_bh[MAX_BUF_PER_PAGE];
2330 unsigned block_in_page;
2331 unsigned block_start;
2332 sector_t block_in_file;
2333 char *kaddr;
2334 int nr_reads = 0;
2335 int i;
2336 int ret = 0;
2337 int is_mapped_to_disk = 1;
2338 int dirtied_it = 0;
2340 if (PageMappedToDisk(page))
2341 return 0;
2343 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2344 map_bh.b_page = page;
2347 * We loop across all blocks in the page, whether or not they are
2348 * part of the affected region. This is so we can discover if the
2349 * page is fully mapped-to-disk.
2351 for (block_start = 0, block_in_page = 0;
2352 block_start < PAGE_CACHE_SIZE;
2353 block_in_page++, block_start += blocksize) {
2354 unsigned block_end = block_start + blocksize;
2355 int create;
2357 map_bh.b_state = 0;
2358 create = 1;
2359 if (block_start >= to)
2360 create = 0;
2361 ret = get_block(inode, block_in_file + block_in_page,
2362 &map_bh, create);
2363 if (ret)
2364 goto failed;
2365 if (!buffer_mapped(&map_bh))
2366 is_mapped_to_disk = 0;
2367 if (buffer_new(&map_bh))
2368 unmap_underlying_metadata(map_bh.b_bdev,
2369 map_bh.b_blocknr);
2370 if (PageUptodate(page))
2371 continue;
2372 if (buffer_new(&map_bh) || !buffer_mapped(&map_bh)) {
2373 kaddr = kmap_atomic(page, KM_USER0);
2374 if (block_start < from) {
2375 memset(kaddr+block_start, 0, from-block_start);
2376 dirtied_it = 1;
2378 if (block_end > to) {
2379 memset(kaddr + to, 0, block_end - to);
2380 dirtied_it = 1;
2382 flush_dcache_page(page);
2383 kunmap_atomic(kaddr, KM_USER0);
2384 continue;
2386 if (buffer_uptodate(&map_bh))
2387 continue; /* reiserfs does this */
2388 if (block_start < from || block_end > to) {
2389 struct buffer_head *bh = alloc_buffer_head(GFP_NOFS);
2391 if (!bh) {
2392 ret = -ENOMEM;
2393 goto failed;
2395 bh->b_state = map_bh.b_state;
2396 atomic_set(&bh->b_count, 0);
2397 bh->b_this_page = 0;
2398 bh->b_page = page;
2399 bh->b_blocknr = map_bh.b_blocknr;
2400 bh->b_size = blocksize;
2401 bh->b_data = (char *)(long)block_start;
2402 bh->b_bdev = map_bh.b_bdev;
2403 bh->b_private = NULL;
2404 read_bh[nr_reads++] = bh;
2408 if (nr_reads) {
2409 ll_rw_block(READ, nr_reads, read_bh);
2410 for (i = 0; i < nr_reads; i++) {
2411 wait_on_buffer(read_bh[i]);
2412 if (!buffer_uptodate(read_bh[i]))
2413 ret = -EIO;
2414 free_buffer_head(read_bh[i]);
2415 read_bh[i] = NULL;
2417 if (ret)
2418 goto failed;
2421 if (is_mapped_to_disk)
2422 SetPageMappedToDisk(page);
2423 SetPageUptodate(page);
2426 * Setting the page dirty here isn't necessary for the prepare_write
2427 * function - commit_write will do that. But if/when this function is
2428 * used within the pagefault handler to ensure that all mmapped pages
2429 * have backing space in the filesystem, we will need to dirty the page
2430 * if its contents were altered.
2432 if (dirtied_it)
2433 set_page_dirty(page);
2435 return 0;
2437 failed:
2438 for (i = 0; i < nr_reads; i++) {
2439 if (read_bh[i])
2440 free_buffer_head(read_bh[i]);
2444 * Error recovery is pretty slack. Clear the page and mark it dirty
2445 * so we'll later zero out any blocks which _were_ allocated.
2447 kaddr = kmap_atomic(page, KM_USER0);
2448 memset(kaddr, 0, PAGE_CACHE_SIZE);
2449 kunmap_atomic(kaddr, KM_USER0);
2450 SetPageUptodate(page);
2451 set_page_dirty(page);
2452 return ret;
2454 EXPORT_SYMBOL(nobh_prepare_write);
2456 int nobh_commit_write(struct file *file, struct page *page,
2457 unsigned from, unsigned to)
2459 struct inode *inode = page->mapping->host;
2460 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2462 set_page_dirty(page);
2463 if (pos > inode->i_size) {
2464 i_size_write(inode, pos);
2465 mark_inode_dirty(inode);
2467 return 0;
2469 EXPORT_SYMBOL(nobh_commit_write);
2472 * This function assumes that ->prepare_write() uses nobh_prepare_write().
2474 int nobh_truncate_page(struct address_space *mapping, loff_t from)
2476 struct inode *inode = mapping->host;
2477 unsigned blocksize = 1 << inode->i_blkbits;
2478 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2479 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2480 unsigned to;
2481 struct page *page;
2482 struct address_space_operations *a_ops = mapping->a_ops;
2483 char *kaddr;
2484 int ret = 0;
2486 if ((offset & (blocksize - 1)) == 0)
2487 goto out;
2489 ret = -ENOMEM;
2490 page = grab_cache_page(mapping, index);
2491 if (!page)
2492 goto out;
2494 to = (offset + blocksize) & ~(blocksize - 1);
2495 ret = a_ops->prepare_write(NULL, page, offset, to);
2496 if (ret == 0) {
2497 kaddr = kmap_atomic(page, KM_USER0);
2498 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2499 flush_dcache_page(page);
2500 kunmap_atomic(kaddr, KM_USER0);
2501 set_page_dirty(page);
2503 unlock_page(page);
2504 page_cache_release(page);
2505 out:
2506 return ret;
2508 EXPORT_SYMBOL(nobh_truncate_page);
2510 int block_truncate_page(struct address_space *mapping,
2511 loff_t from, get_block_t *get_block)
2513 unsigned long index = from >> PAGE_CACHE_SHIFT;
2514 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2515 unsigned blocksize, iblock, length, pos;
2516 struct inode *inode = mapping->host;
2517 struct page *page;
2518 struct buffer_head *bh;
2519 void *kaddr;
2520 int err;
2522 blocksize = 1 << inode->i_blkbits;
2523 length = offset & (blocksize - 1);
2525 /* Block boundary? Nothing to do */
2526 if (!length)
2527 return 0;
2529 length = blocksize - length;
2530 iblock = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2532 page = grab_cache_page(mapping, index);
2533 err = -ENOMEM;
2534 if (!page)
2535 goto out;
2537 if (!page_has_buffers(page))
2538 create_empty_buffers(page, blocksize, 0);
2540 /* Find the buffer that contains "offset" */
2541 bh = page_buffers(page);
2542 pos = blocksize;
2543 while (offset >= pos) {
2544 bh = bh->b_this_page;
2545 iblock++;
2546 pos += blocksize;
2549 err = 0;
2550 if (!buffer_mapped(bh)) {
2551 err = get_block(inode, iblock, bh, 0);
2552 if (err)
2553 goto unlock;
2554 /* unmapped? It's a hole - nothing to do */
2555 if (!buffer_mapped(bh))
2556 goto unlock;
2559 /* Ok, it's mapped. Make sure it's up-to-date */
2560 if (PageUptodate(page))
2561 set_buffer_uptodate(bh);
2563 if (!buffer_uptodate(bh) && !buffer_delay(bh)) {
2564 err = -EIO;
2565 ll_rw_block(READ, 1, &bh);
2566 wait_on_buffer(bh);
2567 /* Uhhuh. Read error. Complain and punt. */
2568 if (!buffer_uptodate(bh))
2569 goto unlock;
2572 kaddr = kmap_atomic(page, KM_USER0);
2573 memset(kaddr + offset, 0, length);
2574 flush_dcache_page(page);
2575 kunmap_atomic(kaddr, KM_USER0);
2577 mark_buffer_dirty(bh);
2578 err = 0;
2580 unlock:
2581 unlock_page(page);
2582 page_cache_release(page);
2583 out:
2584 return err;
2588 * The generic ->writepage function for buffer-backed address_spaces
2590 int block_write_full_page(struct page *page, get_block_t *get_block,
2591 struct writeback_control *wbc)
2593 struct inode * const inode = page->mapping->host;
2594 loff_t i_size = i_size_read(inode);
2595 const unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
2596 unsigned offset;
2597 void *kaddr;
2599 /* Is the page fully inside i_size? */
2600 if (page->index < end_index)
2601 return __block_write_full_page(inode, page, get_block, wbc);
2603 /* Is the page fully outside i_size? (truncate in progress) */
2604 offset = i_size & (PAGE_CACHE_SIZE-1);
2605 if (page->index >= end_index+1 || !offset) {
2607 * The page may have dirty, unmapped buffers. For example,
2608 * they may have been added in ext3_writepage(). Make them
2609 * freeable here, so the page does not leak.
2611 block_invalidatepage(page, 0);
2612 unlock_page(page);
2613 return 0; /* don't care */
2617 * The page straddles i_size. It must be zeroed out on each and every
2618 * writepage invocation because it may be mmapped. "A file is mapped
2619 * in multiples of the page size. For a file that is not a multiple of
2620 * the page size, the remaining memory is zeroed when mapped, and
2621 * writes to that region are not written out to the file."
2623 kaddr = kmap_atomic(page, KM_USER0);
2624 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2625 flush_dcache_page(page);
2626 kunmap_atomic(kaddr, KM_USER0);
2627 return __block_write_full_page(inode, page, get_block, wbc);
2630 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2631 get_block_t *get_block)
2633 struct buffer_head tmp;
2634 struct inode *inode = mapping->host;
2635 tmp.b_state = 0;
2636 tmp.b_blocknr = 0;
2637 get_block(inode, block, &tmp, 0);
2638 return tmp.b_blocknr;
2641 static int end_bio_bh_io_sync(struct bio *bio, unsigned int bytes_done, int err)
2643 struct buffer_head *bh = bio->bi_private;
2645 if (bio->bi_size)
2646 return 1;
2648 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2649 bio_put(bio);
2650 return 0;
2653 int submit_bh(int rw, struct buffer_head * bh)
2655 struct bio *bio;
2657 BUG_ON(!buffer_locked(bh));
2658 BUG_ON(!buffer_mapped(bh));
2659 BUG_ON(!bh->b_end_io);
2661 if ((rw == READ || rw == READA) && buffer_uptodate(bh))
2662 buffer_error();
2663 if (rw == WRITE && !buffer_uptodate(bh))
2664 buffer_error();
2665 if (rw == READ && buffer_dirty(bh))
2666 buffer_error();
2668 /* Only clear out a write error when rewriting */
2669 if (test_set_buffer_req(bh) && rw == WRITE)
2670 clear_buffer_write_io_error(bh);
2673 * from here on down, it's all bio -- do the initial mapping,
2674 * submit_bio -> generic_make_request may further map this bio around
2676 bio = bio_alloc(GFP_NOIO, 1);
2678 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2679 bio->bi_bdev = bh->b_bdev;
2680 bio->bi_io_vec[0].bv_page = bh->b_page;
2681 bio->bi_io_vec[0].bv_len = bh->b_size;
2682 bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2684 bio->bi_vcnt = 1;
2685 bio->bi_idx = 0;
2686 bio->bi_size = bh->b_size;
2688 bio->bi_end_io = end_bio_bh_io_sync;
2689 bio->bi_private = bh;
2691 return submit_bio(rw, bio);
2695 * ll_rw_block: low-level access to block devices (DEPRECATED)
2696 * @rw: whether to %READ or %WRITE or maybe %READA (readahead)
2697 * @nr: number of &struct buffer_heads in the array
2698 * @bhs: array of pointers to &struct buffer_head
2700 * ll_rw_block() takes an array of pointers to &struct buffer_heads,
2701 * and requests an I/O operation on them, either a %READ or a %WRITE.
2702 * The third %READA option is described in the documentation for
2703 * generic_make_request() which ll_rw_block() calls.
2705 * This function drops any buffer that it cannot get a lock on (with the
2706 * BH_Lock state bit), any buffer that appears to be clean when doing a
2707 * write request, and any buffer that appears to be up-to-date when doing
2708 * read request. Further it marks as clean buffers that are processed for
2709 * writing (the buffer cache won't assume that they are actually clean until
2710 * the buffer gets unlocked).
2712 * ll_rw_block sets b_end_io to simple completion handler that marks
2713 * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2714 * any waiters.
2716 * All of the buffers must be for the same device, and must also be a
2717 * multiple of the current approved size for the device.
2719 void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2721 int i;
2723 for (i = 0; i < nr; i++) {
2724 struct buffer_head *bh = bhs[i];
2726 if (test_set_buffer_locked(bh))
2727 continue;
2729 get_bh(bh);
2730 if (rw == WRITE) {
2731 bh->b_end_io = end_buffer_write_sync;
2732 if (test_clear_buffer_dirty(bh)) {
2733 submit_bh(WRITE, bh);
2734 continue;
2736 } else {
2737 bh->b_end_io = end_buffer_read_sync;
2738 if (!buffer_uptodate(bh)) {
2739 submit_bh(rw, bh);
2740 continue;
2743 unlock_buffer(bh);
2744 put_bh(bh);
2749 * For a data-integrity writeout, we need to wait upon any in-progress I/O
2750 * and then start new I/O and then wait upon it.
2752 void sync_dirty_buffer(struct buffer_head *bh)
2754 WARN_ON(atomic_read(&bh->b_count) < 1);
2755 lock_buffer(bh);
2756 if (test_clear_buffer_dirty(bh)) {
2757 get_bh(bh);
2758 bh->b_end_io = end_buffer_write_sync;
2759 submit_bh(WRITE, bh);
2760 wait_on_buffer(bh);
2761 } else {
2762 unlock_buffer(bh);
2767 * Sanity checks for try_to_free_buffers.
2769 static void check_ttfb_buffer(struct page *page, struct buffer_head *bh)
2771 if (!buffer_uptodate(bh) && !buffer_req(bh)) {
2772 if (PageUptodate(page) && page->mapping
2773 && buffer_mapped(bh) /* discard_buffer */
2774 && S_ISBLK(page->mapping->host->i_mode))
2776 buffer_error();
2782 * try_to_free_buffers() checks if all the buffers on this particular page
2783 * are unused, and releases them if so.
2785 * Exclusion against try_to_free_buffers may be obtained by either
2786 * locking the page or by holding its mapping's private_lock.
2788 * If the page is dirty but all the buffers are clean then we need to
2789 * be sure to mark the page clean as well. This is because the page
2790 * may be against a block device, and a later reattachment of buffers
2791 * to a dirty page will set *all* buffers dirty. Which would corrupt
2792 * filesystem data on the same device.
2794 * The same applies to regular filesystem pages: if all the buffers are
2795 * clean then we set the page clean and proceed. To do that, we require
2796 * total exclusion from __set_page_dirty_buffers(). That is obtained with
2797 * private_lock.
2799 * try_to_free_buffers() is non-blocking.
2801 static inline int buffer_busy(struct buffer_head *bh)
2803 return atomic_read(&bh->b_count) |
2804 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
2807 static int
2808 drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
2810 struct buffer_head *head = page_buffers(page);
2811 struct buffer_head *bh;
2812 int was_uptodate = 1;
2814 bh = head;
2815 do {
2816 check_ttfb_buffer(page, bh);
2817 if (buffer_write_io_error(bh))
2818 set_bit(AS_EIO, &page->mapping->flags);
2819 if (buffer_busy(bh))
2820 goto failed;
2821 if (!buffer_uptodate(bh) && !buffer_req(bh))
2822 was_uptodate = 0;
2823 bh = bh->b_this_page;
2824 } while (bh != head);
2826 if (!was_uptodate && PageUptodate(page))
2827 buffer_error();
2829 do {
2830 struct buffer_head *next = bh->b_this_page;
2832 if (!list_empty(&bh->b_assoc_buffers))
2833 __remove_assoc_queue(bh);
2834 bh = next;
2835 } while (bh != head);
2836 *buffers_to_free = head;
2837 __clear_page_buffers(page);
2838 return 1;
2839 failed:
2840 return 0;
2843 int try_to_free_buffers(struct page *page)
2845 struct address_space * const mapping = page->mapping;
2846 struct buffer_head *buffers_to_free = NULL;
2847 int ret = 0;
2849 BUG_ON(!PageLocked(page));
2850 if (PageWriteback(page))
2851 return 0;
2853 if (mapping == NULL) { /* swapped-in anon page */
2854 ret = drop_buffers(page, &buffers_to_free);
2855 goto out;
2858 spin_lock(&mapping->private_lock);
2859 ret = drop_buffers(page, &buffers_to_free);
2860 if (ret && !PageSwapCache(page)) {
2862 * If the filesystem writes its buffers by hand (eg ext3)
2863 * then we can have clean buffers against a dirty page. We
2864 * clean the page here; otherwise later reattachment of buffers
2865 * could encounter a non-uptodate page, which is unresolvable.
2866 * This only applies in the rare case where try_to_free_buffers
2867 * succeeds but the page is not freed.
2869 clear_page_dirty(page);
2871 spin_unlock(&mapping->private_lock);
2872 out:
2873 if (buffers_to_free) {
2874 struct buffer_head *bh = buffers_to_free;
2876 do {
2877 struct buffer_head *next = bh->b_this_page;
2878 free_buffer_head(bh);
2879 bh = next;
2880 } while (bh != buffers_to_free);
2882 return ret;
2884 EXPORT_SYMBOL(try_to_free_buffers);
2886 int block_sync_page(struct page *page)
2888 blk_run_queues();
2889 return 0;
2893 * There are no bdflush tunables left. But distributions are
2894 * still running obsolete flush daemons, so we terminate them here.
2896 * Use of bdflush() is deprecated and will be removed in a future kernel.
2897 * The `pdflush' kernel threads fully replace bdflush daemons and this call.
2899 asmlinkage long sys_bdflush(int func, long data)
2901 static int msg_count;
2903 if (!capable(CAP_SYS_ADMIN))
2904 return -EPERM;
2906 if (msg_count < 5) {
2907 msg_count++;
2908 printk(KERN_INFO
2909 "warning: process `%s' used the obsolete bdflush"
2910 " system call\n", current->comm);
2911 printk(KERN_INFO "Fix your initscripts?\n");
2914 if (func == 1)
2915 do_exit(0);
2916 return 0;
2920 * Buffer-head allocation
2922 static kmem_cache_t *bh_cachep;
2925 * Once the number of bh's in the machine exceeds this level, we start
2926 * stripping them in writeback.
2928 static int max_buffer_heads;
2930 int buffer_heads_over_limit;
2932 struct bh_accounting {
2933 int nr; /* Number of live bh's */
2934 int ratelimit; /* Limit cacheline bouncing */
2937 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
2939 static void recalc_bh_state(void)
2941 int i;
2942 int tot = 0;
2944 if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
2945 return;
2946 __get_cpu_var(bh_accounting).ratelimit = 0;
2947 for (i = 0; i < NR_CPUS; i++) {
2948 if (cpu_online(i))
2949 tot += per_cpu(bh_accounting, i).nr;
2951 buffer_heads_over_limit = (tot > max_buffer_heads);
2954 struct buffer_head *alloc_buffer_head(int gfp_flags)
2956 struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
2957 if (ret) {
2958 preempt_disable();
2959 __get_cpu_var(bh_accounting).nr++;
2960 recalc_bh_state();
2961 preempt_enable();
2963 return ret;
2965 EXPORT_SYMBOL(alloc_buffer_head);
2967 void free_buffer_head(struct buffer_head *bh)
2969 BUG_ON(!list_empty(&bh->b_assoc_buffers));
2970 kmem_cache_free(bh_cachep, bh);
2971 preempt_disable();
2972 __get_cpu_var(bh_accounting).nr--;
2973 recalc_bh_state();
2974 preempt_enable();
2976 EXPORT_SYMBOL(free_buffer_head);
2978 static void
2979 init_buffer_head(void *data, kmem_cache_t *cachep, unsigned long flags)
2981 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
2982 SLAB_CTOR_CONSTRUCTOR) {
2983 struct buffer_head * bh = (struct buffer_head *)data;
2985 memset(bh, 0, sizeof(*bh));
2986 INIT_LIST_HEAD(&bh->b_assoc_buffers);
2990 static void buffer_init_cpu(int cpu)
2992 struct bh_accounting *bha = &per_cpu(bh_accounting, cpu);
2993 struct bh_lru *bhl = &per_cpu(bh_lrus, cpu);
2995 bha->nr = 0;
2996 bha->ratelimit = 0;
2997 memset(bhl, 0, sizeof(*bhl));
3000 static int __devinit buffer_cpu_notify(struct notifier_block *self,
3001 unsigned long action, void *hcpu)
3003 long cpu = (long)hcpu;
3004 switch(action) {
3005 case CPU_UP_PREPARE:
3006 buffer_init_cpu(cpu);
3007 break;
3008 default:
3009 break;
3011 return NOTIFY_OK;
3014 static struct notifier_block __devinitdata buffer_nb = {
3015 .notifier_call = buffer_cpu_notify,
3018 void __init buffer_init(void)
3020 int i;
3021 int nrpages;
3023 bh_cachep = kmem_cache_create("buffer_head",
3024 sizeof(struct buffer_head), 0,
3025 0, init_buffer_head, NULL);
3026 for (i = 0; i < ARRAY_SIZE(bh_wait_queue_heads); i++)
3027 init_waitqueue_head(&bh_wait_queue_heads[i].wqh);
3030 * Limit the bh occupancy to 10% of ZONE_NORMAL
3032 nrpages = (nr_free_buffer_pages() * 10) / 100;
3033 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3034 buffer_cpu_notify(&buffer_nb, (unsigned long)CPU_UP_PREPARE,
3035 (void *)(long)smp_processor_id());
3036 register_cpu_notifier(&buffer_nb);
3039 EXPORT_SYMBOL(__bforget);
3040 EXPORT_SYMBOL(__brelse);
3041 EXPORT_SYMBOL(__wait_on_buffer);
3042 EXPORT_SYMBOL(block_commit_write);
3043 EXPORT_SYMBOL(block_prepare_write);
3044 EXPORT_SYMBOL(block_read_full_page);
3045 EXPORT_SYMBOL(block_sync_page);
3046 EXPORT_SYMBOL(block_truncate_page);
3047 EXPORT_SYMBOL(block_write_full_page);
3048 EXPORT_SYMBOL(buffer_insert_list);
3049 EXPORT_SYMBOL(cont_prepare_write);
3050 EXPORT_SYMBOL(end_buffer_async_write);
3051 EXPORT_SYMBOL(end_buffer_read_sync);
3052 EXPORT_SYMBOL(end_buffer_write_sync);
3053 EXPORT_SYMBOL(file_fsync);
3054 EXPORT_SYMBOL(fsync_bdev);
3055 EXPORT_SYMBOL(fsync_buffers_list);
3056 EXPORT_SYMBOL(generic_block_bmap);
3057 EXPORT_SYMBOL(generic_commit_write);
3058 EXPORT_SYMBOL(generic_cont_expand);
3059 EXPORT_SYMBOL(init_buffer);
3060 EXPORT_SYMBOL(invalidate_bdev);
3061 EXPORT_SYMBOL(ll_rw_block);
3062 EXPORT_SYMBOL(mark_buffer_dirty);
3063 EXPORT_SYMBOL(submit_bh);
3064 EXPORT_SYMBOL(sync_dirty_buffer);
3065 EXPORT_SYMBOL(unlock_buffer);