4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
10 * Removed a lot of unnecessary code and simplified things now that
11 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
13 * Speed up hash, lru, and free list operations. Use gfp() for allocating
14 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
16 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
18 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
21 #include <linux/config.h>
22 #include <linux/kernel.h>
25 #include <linux/percpu.h>
26 #include <linux/slab.h>
27 #include <linux/smp_lock.h>
28 #include <linux/blkdev.h>
29 #include <linux/file.h>
30 #include <linux/quotaops.h>
31 #include <linux/highmem.h>
32 #include <linux/module.h>
33 #include <linux/writeback.h>
34 #include <linux/hash.h>
35 #include <linux/suspend.h>
36 #include <linux/buffer_head.h>
37 #include <linux/bio.h>
38 #include <linux/notifier.h>
39 #include <linux/cpu.h>
40 #include <asm/bitops.h>
42 static void invalidate_bh_lrus(void);
44 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
47 * Hashed waitqueue_head's for wait_on_buffer()
49 #define BH_WAIT_TABLE_ORDER 7
50 static struct bh_wait_queue_head
{
51 wait_queue_head_t wqh
;
52 } ____cacheline_aligned_in_smp bh_wait_queue_heads
[1<<BH_WAIT_TABLE_ORDER
];
55 * Debug/devel support stuff
58 void __buffer_error(char *file
, int line
)
65 printk("buffer layer error at %s:%d\n", file
, line
);
66 #ifndef CONFIG_KALLSYMS
67 printk("Pass this trace through ksymoops for reporting\n");
71 EXPORT_SYMBOL(__buffer_error
);
74 init_buffer(struct buffer_head
*bh
, bh_end_io_t
*handler
, void *private)
76 bh
->b_end_io
= handler
;
77 bh
->b_private
= private;
81 * Return the address of the waitqueue_head to be used for this
84 wait_queue_head_t
*bh_waitq_head(struct buffer_head
*bh
)
86 return &bh_wait_queue_heads
[hash_ptr(bh
, BH_WAIT_TABLE_ORDER
)].wqh
;
88 EXPORT_SYMBOL(bh_waitq_head
);
90 void wake_up_buffer(struct buffer_head
*bh
)
92 wait_queue_head_t
*wq
= bh_waitq_head(bh
);
94 if (waitqueue_active(wq
))
97 EXPORT_SYMBOL(wake_up_buffer
);
99 void unlock_buffer(struct buffer_head
*bh
)
102 * unlock_buffer against a zero-count bh is a bug, if the page
103 * is not locked. Because then nothing protects the buffer's
104 * waitqueue, which is used here. (Well. Other locked buffers
105 * against the page will pin it. But complain anyway).
107 if (atomic_read(&bh
->b_count
) == 0 &&
108 !PageLocked(bh
->b_page
) &&
109 !PageWriteback(bh
->b_page
))
112 clear_buffer_locked(bh
);
113 smp_mb__after_clear_bit();
118 * Block until a buffer comes unlocked. This doesn't stop it
119 * from becoming locked again - you have to lock it yourself
120 * if you want to preserve its state.
122 void __wait_on_buffer(struct buffer_head
* bh
)
124 wait_queue_head_t
*wqh
= bh_waitq_head(bh
);
127 if (atomic_read(&bh
->b_count
) == 0 &&
128 (!bh
->b_page
|| !PageLocked(bh
->b_page
)))
132 prepare_to_wait(wqh
, &wait
, TASK_UNINTERRUPTIBLE
);
133 if (buffer_locked(bh
)) {
137 } while (buffer_locked(bh
));
138 finish_wait(wqh
, &wait
);
142 __set_page_buffers(struct page
*page
, struct buffer_head
*head
)
144 if (page_has_buffers(page
))
146 page_cache_get(page
);
147 SetPagePrivate(page
);
148 page
->private = (unsigned long)head
;
152 __clear_page_buffers(struct page
*page
)
154 ClearPagePrivate(page
);
156 page_cache_release(page
);
159 static void buffer_io_error(struct buffer_head
*bh
)
161 char b
[BDEVNAME_SIZE
];
163 printk(KERN_ERR
"Buffer I/O error on device %s, logical block %Lu\n",
164 bdevname(bh
->b_bdev
, b
),
165 (unsigned long long)bh
->b_blocknr
);
169 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
170 * unlock the buffer. This is what ll_rw_block uses too.
172 void end_buffer_io_sync(struct buffer_head
*bh
, int uptodate
)
175 set_buffer_uptodate(bh
);
178 * This happens, due to failed READA attempts.
179 * buffer_io_error(bh);
181 clear_buffer_uptodate(bh
);
188 * Write out and wait upon all the dirty data associated with a block
189 * device via its mapping. Does not take the superblock lock.
191 int sync_blockdev(struct block_device
*bdev
)
198 ret
= filemap_fdatawrite(bdev
->bd_inode
->i_mapping
);
199 err
= filemap_fdatawait(bdev
->bd_inode
->i_mapping
);
205 EXPORT_SYMBOL(sync_blockdev
);
208 * Write out and wait upon all dirty data associated with this
209 * superblock. Filesystem data as well as the underlying block
210 * device. Takes the superblock lock.
212 int fsync_super(struct super_block
*sb
)
214 sync_inodes_sb(sb
, 0);
217 if (sb
->s_dirt
&& sb
->s_op
->write_super
)
218 sb
->s_op
->write_super(sb
);
220 if (sb
->s_op
->sync_fs
)
221 sb
->s_op
->sync_fs(sb
, 1);
222 sync_blockdev(sb
->s_bdev
);
223 sync_inodes_sb(sb
, 1);
225 return sync_blockdev(sb
->s_bdev
);
229 * Write out and wait upon all dirty data associated with this
230 * device. Filesystem data as well as the underlying block
231 * device. Takes the superblock lock.
233 int fsync_bdev(struct block_device
*bdev
)
235 struct super_block
*sb
= get_super(bdev
);
237 int res
= fsync_super(sb
);
241 return sync_blockdev(bdev
);
245 * sync everything. Start out by waking pdflush, because that writes back
246 * all queues in parallel.
248 static void do_sync(unsigned long wait
)
251 sync_inodes(0); /* All mappings, inodes and their blockdevs */
253 sync_supers(); /* Write the superblocks */
254 sync_filesystems(0); /* Start syncing the filesystems */
255 sync_filesystems(wait
); /* Waitingly sync the filesystems */
256 sync_inodes(wait
); /* Mappings, inodes and blockdevs, again. */
258 printk("Emergency Sync complete\n");
261 asmlinkage
long sys_sync(void)
267 void emergency_sync(void)
269 pdflush_operation(do_sync
, 0);
273 * Generic function to fsync a file.
275 * filp may be NULL if called via the msync of a vma.
278 int file_fsync(struct file
*filp
, struct dentry
*dentry
, int datasync
)
280 struct inode
* inode
= dentry
->d_inode
;
281 struct super_block
* sb
;
284 /* sync the inode to buffers */
285 write_inode_now(inode
, 0);
287 /* sync the superblock to buffers */
290 if (sb
->s_op
->write_super
)
291 sb
->s_op
->write_super(sb
);
294 /* .. finally sync the buffers to disk */
295 ret
= sync_blockdev(sb
->s_bdev
);
299 asmlinkage
long sys_fsync(unsigned int fd
)
302 struct dentry
* dentry
;
303 struct inode
* inode
;
311 dentry
= file
->f_dentry
;
312 inode
= dentry
->d_inode
;
315 if (!file
->f_op
|| !file
->f_op
->fsync
) {
316 /* Why? We can still call filemap_fdatawrite */
320 /* We need to protect against concurrent writers.. */
322 ret
= filemap_fdatawrite(inode
->i_mapping
);
323 err
= file
->f_op
->fsync(file
, dentry
, 0);
326 err
= filemap_fdatawait(inode
->i_mapping
);
337 asmlinkage
long sys_fdatasync(unsigned int fd
)
340 struct dentry
* dentry
;
341 struct inode
* inode
;
349 dentry
= file
->f_dentry
;
350 inode
= dentry
->d_inode
;
353 if (!file
->f_op
|| !file
->f_op
->fsync
)
357 ret
= filemap_fdatawrite(inode
->i_mapping
);
358 err
= file
->f_op
->fsync(file
, dentry
, 1);
361 err
= filemap_fdatawait(inode
->i_mapping
);
373 * Various filesystems appear to want __find_get_block to be non-blocking.
374 * But it's the page lock which protects the buffers. To get around this,
375 * we get exclusion from try_to_free_buffers with the blockdev mapping's
378 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
379 * may be quite high. This code could TryLock the page, and if that
380 * succeeds, there is no need to take private_lock. (But if
381 * private_lock is contended then so is mapping->page_lock).
383 static struct buffer_head
*
384 __find_get_block_slow(struct block_device
*bdev
, sector_t block
, int unused
)
386 struct inode
*bd_inode
= bdev
->bd_inode
;
387 struct address_space
*bd_mapping
= bd_inode
->i_mapping
;
388 struct buffer_head
*ret
= NULL
;
390 struct buffer_head
*bh
;
391 struct buffer_head
*head
;
394 index
= block
>> (PAGE_CACHE_SHIFT
- bd_inode
->i_blkbits
);
395 page
= find_get_page(bd_mapping
, index
);
399 spin_lock(&bd_mapping
->private_lock
);
400 if (!page_has_buffers(page
))
402 head
= page_buffers(page
);
405 if (bh
->b_blocknr
== block
) {
410 bh
= bh
->b_this_page
;
411 } while (bh
!= head
);
414 spin_unlock(&bd_mapping
->private_lock
);
415 page_cache_release(page
);
420 /* If invalidate_buffers() will trash dirty buffers, it means some kind
421 of fs corruption is going on. Trashing dirty data always imply losing
422 information that was supposed to be just stored on the physical layer
425 Thus invalidate_buffers in general usage is not allwowed to trash
426 dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
427 be preserved. These buffers are simply skipped.
429 We also skip buffers which are still in use. For example this can
430 happen if a userspace program is reading the block device.
432 NOTE: In the case where the user removed a removable-media-disk even if
433 there's still dirty data not synced on disk (due a bug in the device driver
434 or due an error of the user), by not destroying the dirty buffers we could
435 generate corruption also on the next media inserted, thus a parameter is
436 necessary to handle this case in the most safe way possible (trying
437 to not corrupt also the new disk inserted with the data belonging to
438 the old now corrupted disk). Also for the ramdisk the natural thing
439 to do in order to release the ramdisk memory is to destroy dirty buffers.
441 These are two special cases. Normal usage imply the device driver
442 to issue a sync on the device (without waiting I/O completion) and
443 then an invalidate_buffers call that doesn't trash dirty buffers.
445 For handling cache coherency with the blkdev pagecache the 'update' case
446 is been introduced. It is needed to re-read from disk any pinned
447 buffer. NOTE: re-reading from disk is destructive so we can do it only
448 when we assume nobody is changing the buffercache under our I/O and when
449 we think the disk contains more recent information than the buffercache.
450 The update == 1 pass marks the buffers we need to update, the update == 2
451 pass does the actual I/O. */
452 void invalidate_bdev(struct block_device
*bdev
, int destroy_dirty_buffers
)
454 invalidate_bh_lrus();
456 * FIXME: what about destroy_dirty_buffers?
457 * We really want to use invalidate_inode_pages2() for
458 * that, but not until that's cleaned up.
460 invalidate_inode_pages(bdev
->bd_inode
->i_mapping
);
464 * Kick pdflush then try to free up some ZONE_NORMAL memory.
466 static void free_more_memory(void)
471 wakeup_bdflush(1024);
475 for_each_pgdat(pgdat
) {
476 zone
= pgdat
->node_zonelists
[GFP_NOFS
&GFP_ZONEMASK
].zones
[0];
478 try_to_free_pages(zone
, GFP_NOFS
, 0);
483 * I/O completion handler for block_read_full_page() - pages
484 * which come unlocked at the end of I/O.
486 static void end_buffer_async_read(struct buffer_head
*bh
, int uptodate
)
488 static spinlock_t page_uptodate_lock
= SPIN_LOCK_UNLOCKED
;
490 struct buffer_head
*tmp
;
492 int page_uptodate
= 1;
494 BUG_ON(!buffer_async_read(bh
));
498 set_buffer_uptodate(bh
);
500 clear_buffer_uptodate(bh
);
506 * Be _very_ careful from here on. Bad things can happen if
507 * two buffer heads end IO at almost the same time and both
508 * decide that the page is now completely done.
510 spin_lock_irqsave(&page_uptodate_lock
, flags
);
511 clear_buffer_async_read(bh
);
515 if (!buffer_uptodate(tmp
))
517 if (buffer_async_read(tmp
)) {
518 BUG_ON(!buffer_locked(tmp
));
521 tmp
= tmp
->b_this_page
;
523 spin_unlock_irqrestore(&page_uptodate_lock
, flags
);
526 * If none of the buffers had errors and they are all
527 * uptodate then we can set the page uptodate.
529 if (page_uptodate
&& !PageError(page
))
530 SetPageUptodate(page
);
535 spin_unlock_irqrestore(&page_uptodate_lock
, flags
);
540 * Completion handler for block_write_full_page() - pages which are unlocked
541 * during I/O, and which have PageWriteback cleared upon I/O completion.
543 void end_buffer_async_write(struct buffer_head
*bh
, int uptodate
)
545 static spinlock_t page_uptodate_lock
= SPIN_LOCK_UNLOCKED
;
547 struct buffer_head
*tmp
;
550 BUG_ON(!buffer_async_write(bh
));
554 set_buffer_uptodate(bh
);
557 clear_buffer_uptodate(bh
);
561 spin_lock_irqsave(&page_uptodate_lock
, flags
);
562 clear_buffer_async_write(bh
);
564 tmp
= bh
->b_this_page
;
566 if (buffer_async_write(tmp
)) {
567 BUG_ON(!buffer_locked(tmp
));
570 tmp
= tmp
->b_this_page
;
572 spin_unlock_irqrestore(&page_uptodate_lock
, flags
);
573 end_page_writeback(page
);
577 spin_unlock_irqrestore(&page_uptodate_lock
, flags
);
582 * If a page's buffers are under async readin (end_buffer_async_read
583 * completion) then there is a possibility that another thread of
584 * control could lock one of the buffers after it has completed
585 * but while some of the other buffers have not completed. This
586 * locked buffer would confuse end_buffer_async_read() into not unlocking
587 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
588 * that this buffer is not under async I/O.
590 * The page comes unlocked when it has no locked buffer_async buffers
593 * PageLocked prevents anyone starting new async I/O reads any of
596 * PageWriteback is used to prevent simultaneous writeout of the same
599 * PageLocked prevents anyone from starting writeback of a page which is
600 * under read I/O (PageWriteback is only ever set against a locked page).
602 void mark_buffer_async_read(struct buffer_head
*bh
)
604 bh
->b_end_io
= end_buffer_async_read
;
605 set_buffer_async_read(bh
);
607 EXPORT_SYMBOL(mark_buffer_async_read
);
609 void mark_buffer_async_write(struct buffer_head
*bh
)
611 bh
->b_end_io
= end_buffer_async_write
;
612 set_buffer_async_write(bh
);
614 EXPORT_SYMBOL(mark_buffer_async_write
);
618 * fs/buffer.c contains helper functions for buffer-backed address space's
619 * fsync functions. A common requirement for buffer-based filesystems is
620 * that certain data from the backing blockdev needs to be written out for
621 * a successful fsync(). For example, ext2 indirect blocks need to be
622 * written back and waited upon before fsync() returns.
624 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
625 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
626 * management of a list of dependent buffers at ->i_mapping->private_list.
628 * Locking is a little subtle: try_to_free_buffers() will remove buffers
629 * from their controlling inode's queue when they are being freed. But
630 * try_to_free_buffers() will be operating against the *blockdev* mapping
631 * at the time, not against the S_ISREG file which depends on those buffers.
632 * So the locking for private_list is via the private_lock in the address_space
633 * which backs the buffers. Which is different from the address_space
634 * against which the buffers are listed. So for a particular address_space,
635 * mapping->private_lock does *not* protect mapping->private_list! In fact,
636 * mapping->private_list will always be protected by the backing blockdev's
639 * Which introduces a requirement: all buffers on an address_space's
640 * ->private_list must be from the same address_space: the blockdev's.
642 * address_spaces which do not place buffers at ->private_list via these
643 * utility functions are free to use private_lock and private_list for
644 * whatever they want. The only requirement is that list_empty(private_list)
645 * be true at clear_inode() time.
647 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
648 * filesystems should do that. invalidate_inode_buffers() should just go
649 * BUG_ON(!list_empty).
651 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
652 * take an address_space, not an inode. And it should be called
653 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
656 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
657 * list if it is already on a list. Because if the buffer is on a list,
658 * it *must* already be on the right one. If not, the filesystem is being
659 * silly. This will save a ton of locking. But first we have to ensure
660 * that buffers are taken *off* the old inode's list when they are freed
661 * (presumably in truncate). That requires careful auditing of all
662 * filesystems (do it inside bforget()). It could also be done by bringing
666 void buffer_insert_list(spinlock_t
*lock
,
667 struct buffer_head
*bh
, struct list_head
*list
)
670 list_move_tail(&bh
->b_assoc_buffers
, list
);
675 * The buffer's backing address_space's private_lock must be held
677 static inline void __remove_assoc_queue(struct buffer_head
*bh
)
679 list_del_init(&bh
->b_assoc_buffers
);
682 int inode_has_buffers(struct inode
*inode
)
684 return !list_empty(&inode
->i_data
.private_list
);
688 * osync is designed to support O_SYNC io. It waits synchronously for
689 * all already-submitted IO to complete, but does not queue any new
690 * writes to the disk.
692 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
693 * you dirty the buffers, and then use osync_inode_buffers to wait for
694 * completion. Any other dirty buffers which are not yet queued for
695 * write will not be flushed to disk by the osync.
697 static int osync_buffers_list(spinlock_t
*lock
, struct list_head
*list
)
699 struct buffer_head
*bh
;
705 list_for_each_prev(p
, list
) {
707 if (buffer_locked(bh
)) {
711 if (!buffer_uptodate(bh
))
723 * sync_mapping_buffers - write out and wait upon a mapping's "associated"
725 * @buffer_mapping - the mapping which backs the buffers' data
726 * @mapping - the mapping which wants those buffers written
728 * Starts I/O against the buffers at mapping->private_list, and waits upon
731 * Basically, this is a convenience function for fsync(). @buffer_mapping is
732 * the blockdev which "owns" the buffers and @mapping is a file or directory
733 * which needs those buffers to be written for a successful fsync().
735 int sync_mapping_buffers(struct address_space
*mapping
)
737 struct address_space
*buffer_mapping
= mapping
->assoc_mapping
;
739 if (buffer_mapping
== NULL
|| list_empty(&mapping
->private_list
))
742 return fsync_buffers_list(&buffer_mapping
->private_lock
,
743 &mapping
->private_list
);
745 EXPORT_SYMBOL(sync_mapping_buffers
);
748 * Called when we've recently written block `bblock', and it is known that
749 * `bblock' was for a buffer_boundary() buffer. This means that the block at
750 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
751 * dirty, schedule it for IO. So that indirects merge nicely with their data.
753 void write_boundary_block(struct block_device
*bdev
,
754 sector_t bblock
, unsigned blocksize
)
756 struct buffer_head
*bh
= __find_get_block(bdev
, bblock
+ 1, blocksize
);
758 if (buffer_dirty(bh
))
759 ll_rw_block(WRITE
, 1, &bh
);
764 void mark_buffer_dirty_inode(struct buffer_head
*bh
, struct inode
*inode
)
766 struct address_space
*mapping
= inode
->i_mapping
;
767 struct address_space
*buffer_mapping
= bh
->b_page
->mapping
;
769 mark_buffer_dirty(bh
);
770 if (!mapping
->assoc_mapping
) {
771 mapping
->assoc_mapping
= buffer_mapping
;
773 if (mapping
->assoc_mapping
!= buffer_mapping
)
776 if (list_empty(&bh
->b_assoc_buffers
))
777 buffer_insert_list(&buffer_mapping
->private_lock
,
778 bh
, &mapping
->private_list
);
780 EXPORT_SYMBOL(mark_buffer_dirty_inode
);
783 * Add a page to the dirty page list.
785 * It is a sad fact of life that this function is called from several places
786 * deeply under spinlocking. It may not sleep.
788 * If the page has buffers, the uptodate buffers are set dirty, to preserve
789 * dirty-state coherency between the page and the buffers. It the page does
790 * not have buffers then when they are later attached they will all be set
793 * The buffers are dirtied before the page is dirtied. There's a small race
794 * window in which a writepage caller may see the page cleanness but not the
795 * buffer dirtiness. That's fine. If this code were to set the page dirty
796 * before the buffers, a concurrent writepage caller could clear the page dirty
797 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
798 * page on the dirty page list.
800 * There is also a small window where the page is dirty, and not on dirty_pages.
801 * Also a possibility that by the time the page is added to dirty_pages, it has
802 * been set clean. The page lists are somewhat approximate in this regard.
803 * It's better to have clean pages accidentally attached to dirty_pages than to
804 * leave dirty pages attached to clean_pages.
806 * We use private_lock to lock against try_to_free_buffers while using the
807 * page's buffer list. Also use this to protect against clean buffers being
808 * added to the page after it was set dirty.
810 * FIXME: may need to call ->reservepage here as well. That's rather up to the
811 * address_space though.
813 * For now, we treat swapper_space specially. It doesn't use the normal
816 int __set_page_dirty_buffers(struct page
*page
)
818 struct address_space
* const mapping
= page
->mapping
;
821 if (mapping
== NULL
) {
826 spin_lock(&mapping
->private_lock
);
827 if (page_has_buffers(page
)) {
828 struct buffer_head
*head
= page_buffers(page
);
829 struct buffer_head
*bh
= head
;
832 if (buffer_uptodate(bh
))
833 set_buffer_dirty(bh
);
836 bh
= bh
->b_this_page
;
837 } while (bh
!= head
);
839 spin_unlock(&mapping
->private_lock
);
841 if (!TestSetPageDirty(page
)) {
842 spin_lock(&mapping
->page_lock
);
843 if (page
->mapping
) { /* Race with truncate? */
844 if (!mapping
->backing_dev_info
->memory_backed
)
845 inc_page_state(nr_dirty
);
846 list_del(&page
->list
);
847 list_add(&page
->list
, &mapping
->dirty_pages
);
849 spin_unlock(&mapping
->page_lock
);
850 __mark_inode_dirty(mapping
->host
, I_DIRTY_PAGES
);
856 EXPORT_SYMBOL(__set_page_dirty_buffers
);
859 * Write out and wait upon a list of buffers.
861 * We have conflicting pressures: we want to make sure that all
862 * initially dirty buffers get waited on, but that any subsequently
863 * dirtied buffers don't. After all, we don't want fsync to last
864 * forever if somebody is actively writing to the file.
866 * Do this in two main stages: first we copy dirty buffers to a
867 * temporary inode list, queueing the writes as we go. Then we clean
868 * up, waiting for those writes to complete.
870 * During this second stage, any subsequent updates to the file may end
871 * up refiling the buffer on the original inode's dirty list again, so
872 * there is a chance we will end up with a buffer queued for write but
873 * not yet completed on that list. So, as a final cleanup we go through
874 * the osync code to catch these locked, dirty buffers without requeuing
875 * any newly dirty buffers for write.
877 int fsync_buffers_list(spinlock_t
*lock
, struct list_head
*list
)
879 struct buffer_head
*bh
;
880 struct list_head tmp
;
883 INIT_LIST_HEAD(&tmp
);
886 while (!list_empty(list
)) {
887 bh
= BH_ENTRY(list
->next
);
888 list_del_init(&bh
->b_assoc_buffers
);
889 if (buffer_dirty(bh
) || buffer_locked(bh
)) {
890 list_add(&bh
->b_assoc_buffers
, &tmp
);
891 if (buffer_dirty(bh
)) {
895 * Ensure any pending I/O completes so that
896 * ll_rw_block() actually writes the current
897 * contents - it is a noop if I/O is still in
898 * flight on potentially older contents.
901 ll_rw_block(WRITE
, 1, &bh
);
908 while (!list_empty(&tmp
)) {
909 bh
= BH_ENTRY(tmp
.prev
);
910 __remove_assoc_queue(bh
);
914 if (!buffer_uptodate(bh
))
921 err2
= osync_buffers_list(lock
, list
);
929 * Invalidate any and all dirty buffers on a given inode. We are
930 * probably unmounting the fs, but that doesn't mean we have already
931 * done a sync(). Just drop the buffers from the inode list.
933 * NOTE: we take the inode's blockdev's mapping's private_lock. Which
934 * assumes that all the buffers are against the blockdev. Not true
937 void invalidate_inode_buffers(struct inode
*inode
)
939 if (inode_has_buffers(inode
)) {
940 struct address_space
*mapping
= &inode
->i_data
;
941 struct list_head
*list
= &mapping
->private_list
;
942 struct address_space
*buffer_mapping
= mapping
->assoc_mapping
;
944 spin_lock(&buffer_mapping
->private_lock
);
945 while (!list_empty(list
))
946 __remove_assoc_queue(BH_ENTRY(list
->next
));
947 spin_unlock(&buffer_mapping
->private_lock
);
952 * Remove any clean buffers from the inode's buffer list. This is called
953 * when we're trying to free the inode itself. Those buffers can pin it.
955 * Returns true if all buffers were removed.
957 int remove_inode_buffers(struct inode
*inode
)
961 if (inode_has_buffers(inode
)) {
962 struct address_space
*mapping
= &inode
->i_data
;
963 struct list_head
*list
= &mapping
->private_list
;
964 struct address_space
*buffer_mapping
= mapping
->assoc_mapping
;
966 spin_lock(&buffer_mapping
->private_lock
);
967 while (!list_empty(list
)) {
968 struct buffer_head
*bh
= BH_ENTRY(list
->next
);
969 if (buffer_dirty(bh
)) {
973 __remove_assoc_queue(bh
);
975 spin_unlock(&buffer_mapping
->private_lock
);
981 * Create the appropriate buffers when given a page for data area and
982 * the size of each buffer.. Use the bh->b_this_page linked list to
983 * follow the buffers created. Return NULL if unable to create more
986 * The retry flag is used to differentiate async IO (paging, swapping)
987 * which may not fail from ordinary buffer allocations.
989 static struct buffer_head
*
990 create_buffers(struct page
* page
, unsigned long size
, int retry
)
992 struct buffer_head
*bh
, *head
;
998 while ((offset
-= size
) >= 0) {
999 bh
= alloc_buffer_head(GFP_NOFS
);
1004 bh
->b_this_page
= head
;
1009 atomic_set(&bh
->b_count
, 0);
1012 /* Link the buffer to its page */
1013 set_bh_page(bh
, page
, offset
);
1015 bh
->b_end_io
= NULL
;
1019 * In case anything failed, we just free everything we got.
1025 head
= head
->b_this_page
;
1026 free_buffer_head(bh
);
1031 * Return failure for non-async IO requests. Async IO requests
1032 * are not allowed to fail, so we have to wait until buffer heads
1033 * become available. But we don't want tasks sleeping with
1034 * partially complete buffers, so all were released above.
1039 /* We're _really_ low on memory. Now we just
1040 * wait for old buffer heads to become free due to
1041 * finishing IO. Since this is an async request and
1042 * the reserve list is empty, we're sure there are
1043 * async buffer heads in use.
1050 link_dev_buffers(struct page
*page
, struct buffer_head
*head
)
1052 struct buffer_head
*bh
, *tail
;
1057 bh
= bh
->b_this_page
;
1059 tail
->b_this_page
= head
;
1060 __set_page_buffers(page
, head
);
1064 * Initialise the state of a blockdev page's buffers.
1067 init_page_buffers(struct page
*page
, struct block_device
*bdev
,
1068 int block
, int size
)
1070 struct buffer_head
*head
= page_buffers(page
);
1071 struct buffer_head
*bh
= head
;
1072 unsigned int b_state
;
1074 b_state
= 1 << BH_Mapped
;
1075 if (PageUptodate(page
))
1076 b_state
|= 1 << BH_Uptodate
;
1079 if (!(bh
->b_state
& (1 << BH_Mapped
))) {
1080 init_buffer(bh
, NULL
, NULL
);
1082 bh
->b_blocknr
= block
;
1083 bh
->b_state
= b_state
;
1086 bh
= bh
->b_this_page
;
1087 } while (bh
!= head
);
1091 * Create the page-cache page that contains the requested block.
1093 * This is user purely for blockdev mappings.
1095 static struct page
*
1096 grow_dev_page(struct block_device
*bdev
, unsigned long block
,
1097 unsigned long index
, int size
)
1099 struct inode
*inode
= bdev
->bd_inode
;
1101 struct buffer_head
*bh
;
1103 page
= find_or_create_page(inode
->i_mapping
, index
, GFP_NOFS
);
1107 if (!PageLocked(page
))
1110 if (page_has_buffers(page
)) {
1111 bh
= page_buffers(page
);
1112 if (bh
->b_size
== size
)
1114 if (!try_to_free_buffers(page
))
1119 * Allocate some buffers for this page
1121 bh
= create_buffers(page
, size
, 0);
1126 * Link the page to the buffers and initialise them. Take the
1127 * lock to be atomic wrt __find_get_block(), which does not
1128 * run under the page lock.
1130 spin_lock(&inode
->i_mapping
->private_lock
);
1131 link_dev_buffers(page
, bh
);
1132 init_page_buffers(page
, bdev
, block
, size
);
1133 spin_unlock(&inode
->i_mapping
->private_lock
);
1139 page_cache_release(page
);
1144 * Create buffers for the specified block device block's page. If
1145 * that page was dirty, the buffers are set dirty also.
1147 * Except that's a bug. Attaching dirty buffers to a dirty
1148 * blockdev's page can result in filesystem corruption, because
1149 * some of those buffers may be aliases of filesystem data.
1150 * grow_dev_page() will go BUG() if this happens.
1153 grow_buffers(struct block_device
*bdev
, unsigned long block
, int size
)
1156 unsigned long index
;
1159 /* Size must be multiple of hard sectorsize */
1160 if (size
& (bdev_hardsect_size(bdev
)-1))
1162 if (size
< 512 || size
> PAGE_SIZE
)
1168 } while ((size
<< sizebits
) < PAGE_SIZE
);
1170 index
= block
>> sizebits
;
1171 block
= index
<< sizebits
;
1173 /* Create a page with the proper size buffers.. */
1174 page
= grow_dev_page(bdev
, block
, index
, size
);
1178 page_cache_release(page
);
1182 struct buffer_head
*
1183 __getblk_slow(struct block_device
*bdev
, sector_t block
, int size
)
1186 struct buffer_head
* bh
;
1188 bh
= __find_get_block(bdev
, block
, size
);
1192 if (!grow_buffers(bdev
, block
, size
))
1198 * The relationship between dirty buffers and dirty pages:
1200 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1201 * the page appears on its address_space.dirty_pages list.
1203 * At all times, the dirtiness of the buffers represents the dirtiness of
1204 * subsections of the page. If the page has buffers, the page dirty bit is
1205 * merely a hint about the true dirty state.
1207 * When a page is set dirty in its entirety, all its buffers are marked dirty
1208 * (if the page has buffers).
1210 * When a buffer is marked dirty, its page is dirtied, but the page's other
1213 * Also. When blockdev buffers are explicitly read with bread(), they
1214 * individually become uptodate. But their backing page remains not
1215 * uptodate - even if all of its buffers are uptodate. A subsequent
1216 * block_read_full_page() against that page will discover all the uptodate
1217 * buffers, will set the page uptodate and will perform no I/O.
1221 * mark_buffer_dirty - mark a buffer_head as needing writeout
1223 * mark_buffer_dirty() will set the dirty bit against the buffer,
1224 * then set its backing page dirty, then attach the page to its
1225 * address_space's dirty_pages list and then attach the address_space's
1226 * inode to its superblock's dirty inode list.
1228 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1229 * mapping->page_lock and the global inode_lock.
1231 void mark_buffer_dirty(struct buffer_head
*bh
)
1233 if (!buffer_uptodate(bh
))
1235 if (!buffer_dirty(bh
) && !test_set_buffer_dirty(bh
))
1236 __set_page_dirty_nobuffers(bh
->b_page
);
1240 * Decrement a buffer_head's reference count. If all buffers against a page
1241 * have zero reference count, are clean and unlocked, and if the page is clean
1242 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1243 * in preparation for freeing it (sometimes, rarely, buffers are removed from
1244 * a page but it ends up not being freed, and buffers may later be reattached).
1246 void __brelse(struct buffer_head
* buf
)
1248 if (atomic_read(&buf
->b_count
)) {
1252 printk(KERN_ERR
"VFS: brelse: Trying to free free buffer\n");
1253 buffer_error(); /* For the stack backtrace */
1257 * bforget() is like brelse(), except it discards any
1258 * potentially dirty data.
1260 void __bforget(struct buffer_head
*bh
)
1262 clear_buffer_dirty(bh
);
1263 if (!list_empty(&bh
->b_assoc_buffers
)) {
1264 struct address_space
*buffer_mapping
= bh
->b_page
->mapping
;
1266 spin_lock(&buffer_mapping
->private_lock
);
1267 list_del_init(&bh
->b_assoc_buffers
);
1268 spin_unlock(&buffer_mapping
->private_lock
);
1273 static struct buffer_head
*__bread_slow(struct buffer_head
*bh
)
1276 if (buffer_uptodate(bh
)) {
1280 if (buffer_dirty(bh
))
1283 bh
->b_end_io
= end_buffer_io_sync
;
1284 submit_bh(READ
, bh
);
1286 if (buffer_uptodate(bh
))
1294 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1295 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1296 * refcount elevated by one when they're in an LRU. A buffer can only appear
1297 * once in a particular CPU's LRU. A single buffer can be present in multiple
1298 * CPU's LRUs at the same time.
1300 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1301 * sb_find_get_block().
1303 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1304 * a local interrupt disable for that.
1307 #define BH_LRU_SIZE 8
1310 struct buffer_head
*bhs
[BH_LRU_SIZE
];
1313 static DEFINE_PER_CPU(struct bh_lru
, bh_lrus
) = {{0}};
1316 #define bh_lru_lock() local_irq_disable()
1317 #define bh_lru_unlock() local_irq_enable()
1319 #define bh_lru_lock() preempt_disable()
1320 #define bh_lru_unlock() preempt_enable()
1323 static inline void check_irqs_on(void)
1325 #ifdef irqs_disabled
1326 BUG_ON(irqs_disabled());
1331 * The LRU management algorithm is dopey-but-simple. Sorry.
1333 static void bh_lru_install(struct buffer_head
*bh
)
1335 struct buffer_head
*evictee
= NULL
;
1340 lru
= &__get_cpu_var(bh_lrus
);
1341 if (lru
->bhs
[0] != bh
) {
1342 struct buffer_head
*bhs
[BH_LRU_SIZE
];
1348 for (in
= 0; in
< BH_LRU_SIZE
; in
++) {
1349 struct buffer_head
*bh2
= lru
->bhs
[in
];
1354 if (out
>= BH_LRU_SIZE
) {
1355 BUG_ON(evictee
!= NULL
);
1362 while (out
< BH_LRU_SIZE
)
1364 memcpy(lru
->bhs
, bhs
, sizeof(bhs
));
1373 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1375 static inline struct buffer_head
*
1376 lookup_bh_lru(struct block_device
*bdev
, sector_t block
, int size
)
1378 struct buffer_head
*ret
= NULL
;
1384 lru
= &__get_cpu_var(bh_lrus
);
1385 for (i
= 0; i
< BH_LRU_SIZE
; i
++) {
1386 struct buffer_head
*bh
= lru
->bhs
[i
];
1388 if (bh
&& bh
->b_bdev
== bdev
&&
1389 bh
->b_blocknr
== block
&& bh
->b_size
== size
) {
1392 lru
->bhs
[i
] = lru
->bhs
[i
- 1];
1407 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1408 * it in the LRU and mark it as accessed. If it is not present then return
1411 struct buffer_head
*
1412 __find_get_block(struct block_device
*bdev
, sector_t block
, int size
)
1414 struct buffer_head
*bh
= lookup_bh_lru(bdev
, block
, size
);
1417 bh
= __find_get_block_slow(bdev
, block
, size
);
1425 EXPORT_SYMBOL(__find_get_block
);
1428 * __getblk will locate (and, if necessary, create) the buffer_head
1429 * which corresponds to the passed block_device, block and size. The
1430 * returned buffer has its reference count incremented.
1432 * __getblk() cannot fail - it just keeps trying. If you pass it an
1433 * illegal block number, __getblk() will happily return a buffer_head
1434 * which represents the non-existent block. Very weird.
1436 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1437 * attempt is failing. FIXME, perhaps?
1439 struct buffer_head
*
1440 __getblk(struct block_device
*bdev
, sector_t block
, int size
)
1442 struct buffer_head
*bh
= __find_get_block(bdev
, block
, size
);
1445 bh
= __getblk_slow(bdev
, block
, size
);
1448 EXPORT_SYMBOL(__getblk
);
1451 * __bread() - reads a specified block and returns the bh
1452 * @block: number of block
1453 * @size: size (in bytes) to read
1455 * Reads a specified block, and returns buffer head that contains it.
1456 * It returns NULL if the block was unreadable.
1458 struct buffer_head
*
1459 __bread(struct block_device
*bdev
, sector_t block
, int size
)
1461 struct buffer_head
*bh
= __getblk(bdev
, block
, size
);
1463 if (!buffer_uptodate(bh
))
1464 bh
= __bread_slow(bh
);
1467 EXPORT_SYMBOL(__bread
);
1470 * invalidate_bh_lrus() is called rarely - at unmount. Because it is only for
1471 * unmount it only needs to ensure that all buffers from the target device are
1472 * invalidated on return and it doesn't need to worry about new buffers from
1473 * that device being added - the unmount code has to prevent that.
1475 static void invalidate_bh_lru(void *arg
)
1477 struct bh_lru
*b
= &get_cpu_var(bh_lrus
);
1480 for (i
= 0; i
< BH_LRU_SIZE
; i
++) {
1484 put_cpu_var(bh_lrus
);
1487 static void invalidate_bh_lrus(void)
1489 on_each_cpu(invalidate_bh_lru
, NULL
, 1, 1);
1492 void set_bh_page(struct buffer_head
*bh
,
1493 struct page
*page
, unsigned long offset
)
1496 if (offset
>= PAGE_SIZE
)
1498 if (PageHighMem(page
))
1500 * This catches illegal uses and preserves the offset:
1502 bh
->b_data
= (char *)(0 + offset
);
1504 bh
->b_data
= page_address(page
) + offset
;
1506 EXPORT_SYMBOL(set_bh_page
);
1509 * Called when truncating a buffer on a page completely.
1511 static inline void discard_buffer(struct buffer_head
* bh
)
1514 clear_buffer_dirty(bh
);
1516 clear_buffer_mapped(bh
);
1517 clear_buffer_req(bh
);
1518 clear_buffer_new(bh
);
1519 clear_buffer_delay(bh
);
1524 * try_to_release_page() - release old fs-specific metadata on a page
1526 * @page: the page which the kernel is trying to free
1527 * @gfp_mask: memory allocation flags (and I/O mode)
1529 * The address_space is to try to release any data against the page
1530 * (presumably at page->private). If the release was successful, return `1'.
1531 * Otherwise return zero.
1533 * The @gfp_mask argument specifies whether I/O may be performed to release
1534 * this page (__GFP_IO), and whether the call may block (__GFP_WAIT).
1536 * NOTE: @gfp_mask may go away, and this function may become non-blocking.
1538 int try_to_release_page(struct page
*page
, int gfp_mask
)
1540 struct address_space
* const mapping
= page
->mapping
;
1542 if (!PageLocked(page
))
1544 if (PageWriteback(page
))
1547 if (mapping
&& mapping
->a_ops
->releasepage
)
1548 return mapping
->a_ops
->releasepage(page
, gfp_mask
);
1549 return try_to_free_buffers(page
);
1553 * block_invalidatepage - invalidate part of all of a buffer-backed page
1555 * @page: the page which is affected
1556 * @offset: the index of the truncation point
1558 * block_invalidatepage() is called when all or part of the page has become
1559 * invalidatedby a truncate operation.
1561 * block_invalidatepage() does not have to release all buffers, but it must
1562 * ensure that no dirty buffer is left outside @offset and that no I/O
1563 * is underway against any of the blocks which are outside the truncation
1564 * point. Because the caller is about to free (and possibly reuse) those
1567 int block_invalidatepage(struct page
*page
, unsigned long offset
)
1569 struct buffer_head
*head
, *bh
, *next
;
1570 unsigned int curr_off
= 0;
1573 BUG_ON(!PageLocked(page
));
1574 if (!page_has_buffers(page
))
1577 head
= page_buffers(page
);
1580 unsigned int next_off
= curr_off
+ bh
->b_size
;
1581 next
= bh
->b_this_page
;
1584 * is this block fully invalidated?
1586 if (offset
<= curr_off
)
1588 curr_off
= next_off
;
1590 } while (bh
!= head
);
1593 * We release buffers only if the entire page is being invalidated.
1594 * The get_block cached value has been unconditionally invalidated,
1595 * so real IO is not possible anymore.
1598 ret
= try_to_release_page(page
, 0);
1602 EXPORT_SYMBOL(block_invalidatepage
);
1605 * We attach and possibly dirty the buffers atomically wrt
1606 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
1607 * is already excluded via the page lock.
1609 void create_empty_buffers(struct page
*page
,
1610 unsigned long blocksize
, unsigned long b_state
)
1612 struct buffer_head
*bh
, *head
, *tail
;
1614 head
= create_buffers(page
, blocksize
, 1);
1617 bh
->b_state
|= b_state
;
1619 bh
= bh
->b_this_page
;
1621 tail
->b_this_page
= head
;
1623 spin_lock(&page
->mapping
->private_lock
);
1624 if (PageUptodate(page
) || PageDirty(page
)) {
1627 if (PageDirty(page
))
1628 set_buffer_dirty(bh
);
1629 if (PageUptodate(page
))
1630 set_buffer_uptodate(bh
);
1631 bh
= bh
->b_this_page
;
1632 } while (bh
!= head
);
1634 __set_page_buffers(page
, head
);
1635 spin_unlock(&page
->mapping
->private_lock
);
1637 EXPORT_SYMBOL(create_empty_buffers
);
1640 * We are taking a block for data and we don't want any output from any
1641 * buffer-cache aliases starting from return from that function and
1642 * until the moment when something will explicitly mark the buffer
1643 * dirty (hopefully that will not happen until we will free that block ;-)
1644 * We don't even need to mark it not-uptodate - nobody can expect
1645 * anything from a newly allocated buffer anyway. We used to used
1646 * unmap_buffer() for such invalidation, but that was wrong. We definitely
1647 * don't want to mark the alias unmapped, for example - it would confuse
1648 * anyone who might pick it with bread() afterwards...
1650 * Also.. Note that bforget() doesn't lock the buffer. So there can
1651 * be writeout I/O going on against recently-freed buffers. We don't
1652 * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1653 * only if we really need to. That happens here.
1655 void unmap_underlying_metadata(struct block_device
*bdev
, sector_t block
)
1657 struct buffer_head
*old_bh
;
1659 old_bh
= __find_get_block_slow(bdev
, block
, 0);
1661 #if 0 /* This happens. Later. */
1662 if (buffer_dirty(old_bh
))
1665 clear_buffer_dirty(old_bh
);
1666 wait_on_buffer(old_bh
);
1667 clear_buffer_req(old_bh
);
1671 EXPORT_SYMBOL(unmap_underlying_metadata
);
1674 * NOTE! All mapped/uptodate combinations are valid:
1676 * Mapped Uptodate Meaning
1678 * No No "unknown" - must do get_block()
1679 * No Yes "hole" - zero-filled
1680 * Yes No "allocated" - allocated on disk, not read in
1681 * Yes Yes "valid" - allocated and up-to-date in memory.
1683 * "Dirty" is valid only with the last case (mapped+uptodate).
1687 * While block_write_full_page is writing back the dirty buffers under
1688 * the page lock, whoever dirtied the buffers may decide to clean them
1689 * again at any time. We handle that by only looking at the buffer
1690 * state inside lock_buffer().
1692 * If block_write_full_page() is called for regular writeback
1693 * (called_for_sync() is false) then it will redirty a page which has a locked
1694 * buffer. This only can happen if someone has written the buffer directly,
1695 * with submit_bh(). At the address_space level PageWriteback prevents this
1696 * contention from occurring.
1698 static int __block_write_full_page(struct inode
*inode
, struct page
*page
,
1699 get_block_t
*get_block
, struct writeback_control
*wbc
)
1702 unsigned long block
;
1703 unsigned long last_block
;
1704 struct buffer_head
*bh
, *head
;
1705 int nr_underway
= 0;
1707 BUG_ON(!PageLocked(page
));
1709 last_block
= (inode
->i_size
- 1) >> inode
->i_blkbits
;
1711 if (!page_has_buffers(page
)) {
1712 if (!PageUptodate(page
))
1714 create_empty_buffers(page
, 1 << inode
->i_blkbits
,
1715 (1 << BH_Dirty
)|(1 << BH_Uptodate
));
1719 * Be very careful. We have no exclusion from __set_page_dirty_buffers
1720 * here, and the (potentially unmapped) buffers may become dirty at
1721 * any time. If a buffer becomes dirty here after we've inspected it
1722 * then we just miss that fact, and the page stays dirty.
1724 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1725 * handle that here by just cleaning them.
1728 block
= page
->index
<< (PAGE_CACHE_SHIFT
- inode
->i_blkbits
);
1729 head
= page_buffers(page
);
1733 * Get all the dirty buffers mapped to disk addresses and
1734 * handle any aliases from the underlying blockdev's mapping.
1737 if (block
> last_block
) {
1739 * mapped buffers outside i_size will occur, because
1740 * this page can be outside i_size when there is a
1741 * truncate in progress.
1743 * if (buffer_mapped(bh))
1747 * The buffer was zeroed by block_write_full_page()
1749 clear_buffer_dirty(bh
);
1750 set_buffer_uptodate(bh
);
1751 } else if (!buffer_mapped(bh
) && buffer_dirty(bh
)) {
1754 err
= get_block(inode
, block
, bh
, 1);
1757 if (buffer_new(bh
)) {
1758 /* blockdev mappings never come here */
1759 clear_buffer_new(bh
);
1760 unmap_underlying_metadata(bh
->b_bdev
,
1764 bh
= bh
->b_this_page
;
1766 } while (bh
!= head
);
1770 if (buffer_mapped(bh
) && buffer_dirty(bh
)) {
1771 if (wbc
->sync_mode
!= WB_SYNC_NONE
) {
1774 if (test_set_buffer_locked(bh
)) {
1775 __set_page_dirty_nobuffers(page
);
1779 if (test_clear_buffer_dirty(bh
)) {
1780 if (!buffer_uptodate(bh
))
1782 mark_buffer_async_write(bh
);
1787 } while ((bh
= bh
->b_this_page
) != head
);
1789 BUG_ON(PageWriteback(page
));
1790 SetPageWriteback(page
); /* Keeps try_to_free_buffers() away */
1794 * The page may come unlocked any time after the *first* submit_bh()
1795 * call. Be careful with its buffers.
1798 struct buffer_head
*next
= bh
->b_this_page
;
1799 if (buffer_async_write(bh
)) {
1800 submit_bh(WRITE
, bh
);
1805 } while (bh
!= head
);
1809 if (nr_underway
== 0) {
1811 * The page was marked dirty, but the buffers were
1812 * clean. Someone wrote them back by hand with
1813 * ll_rw_block/submit_bh. A rare case.
1817 if (!buffer_uptodate(bh
)) {
1821 bh
= bh
->b_this_page
;
1822 } while (bh
!= head
);
1824 SetPageUptodate(page
);
1825 end_page_writeback(page
);
1831 * ENOSPC, or some other error. We may already have added some
1832 * blocks to the file, so we need to write these out to avoid
1833 * exposing stale data.
1834 * The page is currently locked and not marked for writeback
1837 /* Recovery: lock and submit the mapped buffers */
1840 if (buffer_mapped(bh
) && buffer_dirty(bh
)) {
1842 mark_buffer_async_write(bh
);
1845 * The buffer may have been set dirty during
1846 * attachment to a dirty page.
1848 clear_buffer_dirty(bh
);
1850 } while ((bh
= bh
->b_this_page
) != head
);
1852 BUG_ON(PageWriteback(page
));
1853 SetPageWriteback(page
);
1856 struct buffer_head
*next
= bh
->b_this_page
;
1857 if (buffer_async_write(bh
)) {
1858 clear_buffer_dirty(bh
);
1859 submit_bh(WRITE
, bh
);
1864 } while (bh
!= head
);
1868 static int __block_prepare_write(struct inode
*inode
, struct page
*page
,
1869 unsigned from
, unsigned to
, get_block_t
*get_block
)
1871 unsigned block_start
, block_end
;
1874 unsigned blocksize
, bbits
;
1875 struct buffer_head
*bh
, *head
, *wait
[2], **wait_bh
=wait
;
1877 BUG_ON(!PageLocked(page
));
1878 BUG_ON(from
> PAGE_CACHE_SIZE
);
1879 BUG_ON(to
> PAGE_CACHE_SIZE
);
1882 blocksize
= 1 << inode
->i_blkbits
;
1883 if (!page_has_buffers(page
))
1884 create_empty_buffers(page
, blocksize
, 0);
1885 head
= page_buffers(page
);
1887 bbits
= inode
->i_blkbits
;
1888 block
= (sector_t
)page
->index
<< (PAGE_CACHE_SHIFT
- bbits
);
1890 for(bh
= head
, block_start
= 0; bh
!= head
|| !block_start
;
1891 block
++, block_start
=block_end
, bh
= bh
->b_this_page
) {
1892 block_end
= block_start
+ blocksize
;
1893 if (block_end
<= from
|| block_start
>= to
) {
1894 if (PageUptodate(page
)) {
1895 if (!buffer_uptodate(bh
))
1896 set_buffer_uptodate(bh
);
1901 clear_buffer_new(bh
);
1902 if (!buffer_mapped(bh
)) {
1903 err
= get_block(inode
, block
, bh
, 1);
1906 if (buffer_new(bh
)) {
1907 clear_buffer_new(bh
);
1908 unmap_underlying_metadata(bh
->b_bdev
,
1910 if (PageUptodate(page
)) {
1911 if (!buffer_mapped(bh
))
1913 set_buffer_uptodate(bh
);
1916 if (block_end
> to
|| block_start
< from
) {
1919 kaddr
= kmap_atomic(page
, KM_USER0
);
1923 if (block_start
< from
)
1924 memset(kaddr
+block_start
,
1925 0, from
-block_start
);
1926 flush_dcache_page(page
);
1927 kunmap_atomic(kaddr
, KM_USER0
);
1932 if (PageUptodate(page
)) {
1933 if (!buffer_uptodate(bh
))
1934 set_buffer_uptodate(bh
);
1937 if (!buffer_uptodate(bh
) && !buffer_delay(bh
) &&
1938 (block_start
< from
|| block_end
> to
)) {
1939 ll_rw_block(READ
, 1, &bh
);
1944 * If we issued read requests - let them complete.
1946 while(wait_bh
> wait
) {
1947 wait_on_buffer(*--wait_bh
);
1948 if (!buffer_uptodate(*wait_bh
))
1954 * Zero out any newly allocated blocks to avoid exposing stale
1955 * data. If BH_New is set, we know that the block was newly
1956 * allocated in the above loop.
1961 block_end
= block_start
+blocksize
;
1962 if (block_end
<= from
)
1964 if (block_start
>= to
)
1966 if (buffer_new(bh
)) {
1969 clear_buffer_new(bh
);
1970 if (buffer_uptodate(bh
))
1972 kaddr
= kmap_atomic(page
, KM_USER0
);
1973 memset(kaddr
+block_start
, 0, bh
->b_size
);
1974 kunmap_atomic(kaddr
, KM_USER0
);
1975 set_buffer_uptodate(bh
);
1976 mark_buffer_dirty(bh
);
1979 block_start
= block_end
;
1980 bh
= bh
->b_this_page
;
1981 } while (bh
!= head
);
1985 static int __block_commit_write(struct inode
*inode
, struct page
*page
,
1986 unsigned from
, unsigned to
)
1988 unsigned block_start
, block_end
;
1991 struct buffer_head
*bh
, *head
;
1993 blocksize
= 1 << inode
->i_blkbits
;
1995 for(bh
= head
= page_buffers(page
), block_start
= 0;
1996 bh
!= head
|| !block_start
;
1997 block_start
=block_end
, bh
= bh
->b_this_page
) {
1998 block_end
= block_start
+ blocksize
;
1999 if (block_end
<= from
|| block_start
>= to
) {
2000 if (!buffer_uptodate(bh
))
2003 set_buffer_uptodate(bh
);
2004 mark_buffer_dirty(bh
);
2009 * If this is a partial write which happened to make all buffers
2010 * uptodate then we can optimize away a bogus readpage() for
2011 * the next read(). Here we 'discover' whether the page went
2012 * uptodate as a result of this (potentially partial) write.
2015 SetPageUptodate(page
);
2020 * Generic "read page" function for block devices that have the normal
2021 * get_block functionality. This is most of the block device filesystems.
2022 * Reads the page asynchronously --- the unlock_buffer() and
2023 * set/clear_buffer_uptodate() functions propagate buffer state into the
2024 * page struct once IO has completed.
2026 int block_read_full_page(struct page
*page
, get_block_t
*get_block
)
2028 struct inode
*inode
= page
->mapping
->host
;
2029 sector_t iblock
, lblock
;
2030 struct buffer_head
*bh
, *head
, *arr
[MAX_BUF_PER_PAGE
];
2031 unsigned int blocksize
;
2033 int fully_mapped
= 1;
2035 if (!PageLocked(page
))
2037 if (PageUptodate(page
))
2039 blocksize
= 1 << inode
->i_blkbits
;
2040 if (!page_has_buffers(page
))
2041 create_empty_buffers(page
, blocksize
, 0);
2042 head
= page_buffers(page
);
2044 iblock
= (sector_t
)page
->index
<< (PAGE_CACHE_SHIFT
- inode
->i_blkbits
);
2045 lblock
= (inode
->i_size
+blocksize
-1) >> inode
->i_blkbits
;
2051 if (buffer_uptodate(bh
))
2054 if (!buffer_mapped(bh
)) {
2056 if (iblock
< lblock
) {
2057 if (get_block(inode
, iblock
, bh
, 0))
2060 if (!buffer_mapped(bh
)) {
2061 void *kaddr
= kmap_atomic(page
, KM_USER0
);
2062 memset(kaddr
+ i
* blocksize
, 0, blocksize
);
2063 flush_dcache_page(page
);
2064 kunmap_atomic(kaddr
, KM_USER0
);
2065 set_buffer_uptodate(bh
);
2069 * get_block() might have updated the buffer
2072 if (buffer_uptodate(bh
))
2076 } while (i
++, iblock
++, (bh
= bh
->b_this_page
) != head
);
2079 SetPageMappedToDisk(page
);
2083 * All buffers are uptodate - we can set the page uptodate
2084 * as well. But not if get_block() returned an error.
2086 if (!PageError(page
))
2087 SetPageUptodate(page
);
2092 /* Stage two: lock the buffers */
2093 for (i
= 0; i
< nr
; i
++) {
2096 mark_buffer_async_read(bh
);
2100 * Stage 3: start the IO. Check for uptodateness
2101 * inside the buffer lock in case another process reading
2102 * the underlying blockdev brought it uptodate (the sct fix).
2104 for (i
= 0; i
< nr
; i
++) {
2106 if (buffer_uptodate(bh
))
2107 end_buffer_async_read(bh
, 1);
2109 submit_bh(READ
, bh
);
2114 /* utility function for filesystems that need to do work on expanding
2115 * truncates. Uses prepare/commit_write to allow the filesystem to
2116 * deal with the hole.
2118 int generic_cont_expand(struct inode
*inode
, loff_t size
)
2120 struct address_space
*mapping
= inode
->i_mapping
;
2122 unsigned long index
, offset
, limit
;
2126 limit
= current
->rlim
[RLIMIT_FSIZE
].rlim_cur
;
2127 if (limit
!= RLIM_INFINITY
&& size
> (loff_t
)limit
) {
2128 send_sig(SIGXFSZ
, current
, 0);
2131 if (size
> inode
->i_sb
->s_maxbytes
)
2134 offset
= (size
& (PAGE_CACHE_SIZE
-1)); /* Within page */
2136 /* ugh. in prepare/commit_write, if from==to==start of block, we
2137 ** skip the prepare. make sure we never send an offset for the start
2140 if ((offset
& (inode
->i_sb
->s_blocksize
- 1)) == 0) {
2143 index
= size
>> PAGE_CACHE_SHIFT
;
2145 page
= grab_cache_page(mapping
, index
);
2148 err
= mapping
->a_ops
->prepare_write(NULL
, page
, offset
, offset
);
2150 err
= mapping
->a_ops
->commit_write(NULL
, page
, offset
, offset
);
2153 page_cache_release(page
);
2161 * For moronic filesystems that do not allow holes in file.
2162 * We may have to extend the file.
2165 int cont_prepare_write(struct page
*page
, unsigned offset
,
2166 unsigned to
, get_block_t
*get_block
, loff_t
*bytes
)
2168 struct address_space
*mapping
= page
->mapping
;
2169 struct inode
*inode
= mapping
->host
;
2170 struct page
*new_page
;
2171 unsigned long pgpos
;
2174 unsigned blocksize
= 1 << inode
->i_blkbits
;
2177 while(page
->index
> (pgpos
= *bytes
>>PAGE_CACHE_SHIFT
)) {
2179 new_page
= grab_cache_page(mapping
, pgpos
);
2182 /* we might sleep */
2183 if (*bytes
>>PAGE_CACHE_SHIFT
!= pgpos
) {
2184 unlock_page(new_page
);
2185 page_cache_release(new_page
);
2188 zerofrom
= *bytes
& ~PAGE_CACHE_MASK
;
2189 if (zerofrom
& (blocksize
-1)) {
2190 *bytes
|= (blocksize
-1);
2193 status
= __block_prepare_write(inode
, new_page
, zerofrom
,
2194 PAGE_CACHE_SIZE
, get_block
);
2197 kaddr
= kmap_atomic(new_page
, KM_USER0
);
2198 memset(kaddr
+zerofrom
, 0, PAGE_CACHE_SIZE
-zerofrom
);
2199 flush_dcache_page(new_page
);
2200 kunmap_atomic(kaddr
, KM_USER0
);
2201 __block_commit_write(inode
, new_page
,
2202 zerofrom
, PAGE_CACHE_SIZE
);
2203 unlock_page(new_page
);
2204 page_cache_release(new_page
);
2207 if (page
->index
< pgpos
) {
2208 /* completely inside the area */
2211 /* page covers the boundary, find the boundary offset */
2212 zerofrom
= *bytes
& ~PAGE_CACHE_MASK
;
2214 /* if we will expand the thing last block will be filled */
2215 if (to
> zerofrom
&& (zerofrom
& (blocksize
-1))) {
2216 *bytes
|= (blocksize
-1);
2220 /* starting below the boundary? Nothing to zero out */
2221 if (offset
<= zerofrom
)
2224 status
= __block_prepare_write(inode
, page
, zerofrom
, to
, get_block
);
2227 if (zerofrom
< offset
) {
2228 kaddr
= kmap_atomic(page
, KM_USER0
);
2229 memset(kaddr
+zerofrom
, 0, offset
-zerofrom
);
2230 flush_dcache_page(page
);
2231 kunmap_atomic(kaddr
, KM_USER0
);
2232 __block_commit_write(inode
, page
, zerofrom
, offset
);
2236 ClearPageUptodate(page
);
2240 ClearPageUptodate(new_page
);
2241 unlock_page(new_page
);
2242 page_cache_release(new_page
);
2247 int block_prepare_write(struct page
*page
, unsigned from
, unsigned to
,
2248 get_block_t
*get_block
)
2250 struct inode
*inode
= page
->mapping
->host
;
2251 int err
= __block_prepare_write(inode
, page
, from
, to
, get_block
);
2253 ClearPageUptodate(page
);
2257 int block_commit_write(struct page
*page
, unsigned from
, unsigned to
)
2259 struct inode
*inode
= page
->mapping
->host
;
2260 __block_commit_write(inode
,page
,from
,to
);
2264 int generic_commit_write(struct file
*file
, struct page
*page
,
2265 unsigned from
, unsigned to
)
2267 struct inode
*inode
= page
->mapping
->host
;
2268 loff_t pos
= ((loff_t
)page
->index
<< PAGE_CACHE_SHIFT
) + to
;
2269 __block_commit_write(inode
,page
,from
,to
);
2270 if (pos
> inode
->i_size
) {
2271 inode
->i_size
= pos
;
2272 mark_inode_dirty(inode
);
2278 * On entry, the page is fully not uptodate.
2279 * On exit the page is fully uptodate in the areas outside (from,to)
2281 int nobh_prepare_write(struct page
*page
, unsigned from
, unsigned to
,
2282 get_block_t
*get_block
)
2284 struct inode
*inode
= page
->mapping
->host
;
2285 const unsigned blkbits
= inode
->i_blkbits
;
2286 const unsigned blocksize
= 1 << blkbits
;
2287 struct buffer_head map_bh
;
2288 struct buffer_head
*read_bh
[MAX_BUF_PER_PAGE
];
2289 unsigned block_in_page
;
2290 unsigned block_start
;
2291 sector_t block_in_file
;
2296 int is_mapped_to_disk
= 1;
2299 if (PageMappedToDisk(page
))
2302 block_in_file
= (sector_t
)page
->index
<< (PAGE_CACHE_SHIFT
- blkbits
);
2303 map_bh
.b_page
= page
;
2306 * We loop across all blocks in the page, whether or not they are
2307 * part of the affected region. This is so we can discover if the
2308 * page is fully mapped-to-disk.
2310 for (block_start
= 0, block_in_page
= 0;
2311 block_start
< PAGE_CACHE_SIZE
;
2312 block_in_page
++, block_start
+= blocksize
) {
2313 unsigned block_end
= block_start
+ blocksize
;
2318 if (block_start
>= to
)
2320 ret
= get_block(inode
, block_in_file
+ block_in_page
,
2324 if (!buffer_mapped(&map_bh
))
2325 is_mapped_to_disk
= 0;
2326 if (buffer_new(&map_bh
))
2327 unmap_underlying_metadata(map_bh
.b_bdev
,
2329 if (PageUptodate(page
))
2331 if (buffer_new(&map_bh
) || !buffer_mapped(&map_bh
)) {
2332 kaddr
= kmap_atomic(page
, KM_USER0
);
2333 if (block_start
< from
) {
2334 memset(kaddr
+block_start
, 0, from
-block_start
);
2337 if (block_end
> to
) {
2338 memset(kaddr
+ to
, 0, block_end
- to
);
2341 flush_dcache_page(page
);
2342 kunmap_atomic(kaddr
, KM_USER0
);
2345 if (buffer_uptodate(&map_bh
))
2346 continue; /* reiserfs does this */
2347 if (block_start
< from
|| block_end
> to
) {
2348 struct buffer_head
*bh
= alloc_buffer_head(GFP_NOFS
);
2354 bh
->b_state
= map_bh
.b_state
;
2355 atomic_set(&bh
->b_count
, 0);
2356 bh
->b_this_page
= 0;
2358 bh
->b_blocknr
= map_bh
.b_blocknr
;
2359 bh
->b_size
= blocksize
;
2360 bh
->b_data
= (char *)(long)block_start
;
2361 bh
->b_bdev
= map_bh
.b_bdev
;
2362 bh
->b_private
= NULL
;
2363 read_bh
[nr_reads
++] = bh
;
2368 ll_rw_block(READ
, nr_reads
, read_bh
);
2369 for (i
= 0; i
< nr_reads
; i
++) {
2370 wait_on_buffer(read_bh
[i
]);
2371 if (!buffer_uptodate(read_bh
[i
]))
2373 free_buffer_head(read_bh
[i
]);
2380 if (is_mapped_to_disk
)
2381 SetPageMappedToDisk(page
);
2382 SetPageUptodate(page
);
2385 * Setting the page dirty here isn't necessary for the prepare_write
2386 * function - commit_write will do that. But if/when this function is
2387 * used within the pagefault handler to ensure that all mmapped pages
2388 * have backing space in the filesystem, we will need to dirty the page
2389 * if its contents were altered.
2392 set_page_dirty(page
);
2397 for (i
= 0; i
< nr_reads
; i
++) {
2399 free_buffer_head(read_bh
[i
]);
2403 * Error recovery is pretty slack. Clear the page and mark it dirty
2404 * so we'll later zero out any blocks which _were_ allocated.
2406 kaddr
= kmap_atomic(page
, KM_USER0
);
2407 memset(kaddr
, 0, PAGE_CACHE_SIZE
);
2408 kunmap_atomic(kaddr
, KM_USER0
);
2409 SetPageUptodate(page
);
2410 set_page_dirty(page
);
2413 EXPORT_SYMBOL(nobh_prepare_write
);
2415 int nobh_commit_write(struct file
*file
, struct page
*page
,
2416 unsigned from
, unsigned to
)
2418 struct inode
*inode
= page
->mapping
->host
;
2419 loff_t pos
= ((loff_t
)page
->index
<< PAGE_CACHE_SHIFT
) + to
;
2421 set_page_dirty(page
);
2422 if (pos
> inode
->i_size
) {
2423 inode
->i_size
= pos
;
2424 mark_inode_dirty(inode
);
2428 EXPORT_SYMBOL(nobh_commit_write
);
2431 * This function assumes that ->prepare_write() uses nobh_prepare_write().
2433 int nobh_truncate_page(struct address_space
*mapping
, loff_t from
)
2435 struct inode
*inode
= mapping
->host
;
2436 unsigned blocksize
= 1 << inode
->i_blkbits
;
2437 pgoff_t index
= from
>> PAGE_CACHE_SHIFT
;
2438 unsigned offset
= from
& (PAGE_CACHE_SIZE
-1);
2441 struct address_space_operations
*a_ops
= mapping
->a_ops
;
2445 if ((offset
& (blocksize
- 1)) == 0)
2449 page
= grab_cache_page(mapping
, index
);
2453 to
= (offset
+ blocksize
) & ~(blocksize
- 1);
2454 ret
= a_ops
->prepare_write(NULL
, page
, offset
, to
);
2456 kaddr
= kmap_atomic(page
, KM_USER0
);
2457 memset(kaddr
+ offset
, 0, PAGE_CACHE_SIZE
- offset
);
2458 flush_dcache_page(page
);
2459 kunmap_atomic(kaddr
, KM_USER0
);
2460 set_page_dirty(page
);
2463 page_cache_release(page
);
2467 EXPORT_SYMBOL(nobh_truncate_page
);
2469 int block_truncate_page(struct address_space
*mapping
,
2470 loff_t from
, get_block_t
*get_block
)
2472 unsigned long index
= from
>> PAGE_CACHE_SHIFT
;
2473 unsigned offset
= from
& (PAGE_CACHE_SIZE
-1);
2474 unsigned blocksize
, iblock
, length
, pos
;
2475 struct inode
*inode
= mapping
->host
;
2477 struct buffer_head
*bh
;
2481 blocksize
= 1 << inode
->i_blkbits
;
2482 length
= offset
& (blocksize
- 1);
2484 /* Block boundary? Nothing to do */
2488 length
= blocksize
- length
;
2489 iblock
= index
<< (PAGE_CACHE_SHIFT
- inode
->i_blkbits
);
2491 page
= grab_cache_page(mapping
, index
);
2496 if (!page_has_buffers(page
))
2497 create_empty_buffers(page
, blocksize
, 0);
2499 /* Find the buffer that contains "offset" */
2500 bh
= page_buffers(page
);
2502 while (offset
>= pos
) {
2503 bh
= bh
->b_this_page
;
2509 if (!buffer_mapped(bh
)) {
2510 err
= get_block(inode
, iblock
, bh
, 0);
2513 /* unmapped? It's a hole - nothing to do */
2514 if (!buffer_mapped(bh
))
2518 /* Ok, it's mapped. Make sure it's up-to-date */
2519 if (PageUptodate(page
))
2520 set_buffer_uptodate(bh
);
2522 if (!buffer_uptodate(bh
) && !buffer_delay(bh
)) {
2524 ll_rw_block(READ
, 1, &bh
);
2526 /* Uhhuh. Read error. Complain and punt. */
2527 if (!buffer_uptodate(bh
))
2531 kaddr
= kmap_atomic(page
, KM_USER0
);
2532 memset(kaddr
+ offset
, 0, length
);
2533 flush_dcache_page(page
);
2534 kunmap_atomic(kaddr
, KM_USER0
);
2536 mark_buffer_dirty(bh
);
2541 page_cache_release(page
);
2547 * The generic ->writepage function for buffer-backed address_spaces
2549 int block_write_full_page(struct page
*page
, get_block_t
*get_block
,
2550 struct writeback_control
*wbc
)
2552 struct inode
* const inode
= page
->mapping
->host
;
2553 const unsigned long end_index
= inode
->i_size
>> PAGE_CACHE_SHIFT
;
2557 /* Is the page fully inside i_size? */
2558 if (page
->index
< end_index
)
2559 return __block_write_full_page(inode
, page
, get_block
, wbc
);
2561 /* Is the page fully outside i_size? (truncate in progress) */
2562 offset
= inode
->i_size
& (PAGE_CACHE_SIZE
-1);
2563 if (page
->index
>= end_index
+1 || !offset
) {
2565 * The page may have dirty, unmapped buffers. For example,
2566 * they may have been added in ext3_writepage(). Make them
2567 * freeable here, so the page does not leak.
2569 block_invalidatepage(page
, 0);
2575 * The page straddles i_size. It must be zeroed out on each and every
2576 * writepage invocation because it may be mmapped. "A file is mapped
2577 * in multiples of the page size. For a file that is not a multiple of
2578 * the page size, the remaining memory is zeroed when mapped, and
2579 * writes to that region are not written out to the file."
2581 kaddr
= kmap_atomic(page
, KM_USER0
);
2582 memset(kaddr
+ offset
, 0, PAGE_CACHE_SIZE
- offset
);
2583 flush_dcache_page(page
);
2584 kunmap_atomic(kaddr
, KM_USER0
);
2585 return __block_write_full_page(inode
, page
, get_block
, wbc
);
2588 sector_t
generic_block_bmap(struct address_space
*mapping
, sector_t block
,
2589 get_block_t
*get_block
)
2591 struct buffer_head tmp
;
2592 struct inode
*inode
= mapping
->host
;
2595 get_block(inode
, block
, &tmp
, 0);
2596 return tmp
.b_blocknr
;
2599 static int end_bio_bh_io_sync(struct bio
*bio
, unsigned int bytes_done
, int err
)
2601 struct buffer_head
*bh
= bio
->bi_private
;
2606 bh
->b_end_io(bh
, test_bit(BIO_UPTODATE
, &bio
->bi_flags
));
2611 int submit_bh(int rw
, struct buffer_head
* bh
)
2615 BUG_ON(!buffer_locked(bh
));
2616 BUG_ON(!buffer_mapped(bh
));
2617 BUG_ON(!bh
->b_end_io
);
2619 if ((rw
== READ
|| rw
== READA
) && buffer_uptodate(bh
))
2621 if (rw
== WRITE
&& !buffer_uptodate(bh
))
2623 if (rw
== READ
&& buffer_dirty(bh
))
2629 * from here on down, it's all bio -- do the initial mapping,
2630 * submit_bio -> generic_make_request may further map this bio around
2632 bio
= bio_alloc(GFP_NOIO
, 1);
2634 bio
->bi_sector
= bh
->b_blocknr
* (bh
->b_size
>> 9);
2635 bio
->bi_bdev
= bh
->b_bdev
;
2636 bio
->bi_io_vec
[0].bv_page
= bh
->b_page
;
2637 bio
->bi_io_vec
[0].bv_len
= bh
->b_size
;
2638 bio
->bi_io_vec
[0].bv_offset
= bh_offset(bh
);
2642 bio
->bi_size
= bh
->b_size
;
2644 bio
->bi_end_io
= end_bio_bh_io_sync
;
2645 bio
->bi_private
= bh
;
2647 return submit_bio(rw
, bio
);
2651 * ll_rw_block: low-level access to block devices (DEPRECATED)
2652 * @rw: whether to %READ or %WRITE or maybe %READA (readahead)
2653 * @nr: number of &struct buffer_heads in the array
2654 * @bhs: array of pointers to &struct buffer_head
2656 * ll_rw_block() takes an array of pointers to &struct buffer_heads,
2657 * and requests an I/O operation on them, either a %READ or a %WRITE.
2658 * The third %READA option is described in the documentation for
2659 * generic_make_request() which ll_rw_block() calls.
2661 * This function drops any buffer that it cannot get a lock on (with the
2662 * BH_Lock state bit), any buffer that appears to be clean when doing a
2663 * write request, and any buffer that appears to be up-to-date when doing
2664 * read request. Further it marks as clean buffers that are processed for
2665 * writing (the buffer cache won't assume that they are actually clean until
2666 * the buffer gets unlocked).
2668 * ll_rw_block sets b_end_io to simple completion handler that marks
2669 * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2672 * All of the buffers must be for the same device, and must also be a
2673 * multiple of the current approved size for the device.
2675 void ll_rw_block(int rw
, int nr
, struct buffer_head
*bhs
[])
2679 for (i
= 0; i
< nr
; i
++) {
2680 struct buffer_head
*bh
= bhs
[i
];
2682 if (test_set_buffer_locked(bh
))
2686 bh
->b_end_io
= end_buffer_io_sync
;
2688 if (test_clear_buffer_dirty(bh
)) {
2689 submit_bh(WRITE
, bh
);
2693 if (!buffer_uptodate(bh
)) {
2704 * For a data-integrity writeout, we need to wait upon any in-progress I/O
2705 * and then start new I/O and then wait upon it.
2707 void sync_dirty_buffer(struct buffer_head
*bh
)
2709 WARN_ON(atomic_read(&bh
->b_count
) < 1);
2711 if (test_clear_buffer_dirty(bh
)) {
2713 bh
->b_end_io
= end_buffer_io_sync
;
2714 submit_bh(WRITE
, bh
);
2722 * Sanity checks for try_to_free_buffers.
2724 static void check_ttfb_buffer(struct page
*page
, struct buffer_head
*bh
)
2726 if (!buffer_uptodate(bh
) && !buffer_req(bh
)) {
2727 if (PageUptodate(page
) && page
->mapping
2728 && buffer_mapped(bh
) /* discard_buffer */
2729 && S_ISBLK(page
->mapping
->host
->i_mode
))
2737 * try_to_free_buffers() checks if all the buffers on this particular page
2738 * are unused, and releases them if so.
2740 * Exclusion against try_to_free_buffers may be obtained by either
2741 * locking the page or by holding its mapping's private_lock.
2743 * If the page is dirty but all the buffers are clean then we need to
2744 * be sure to mark the page clean as well. This is because the page
2745 * may be against a block device, and a later reattachment of buffers
2746 * to a dirty page will set *all* buffers dirty. Which would corrupt
2747 * filesystem data on the same device.
2749 * The same applies to regular filesystem pages: if all the buffers are
2750 * clean then we set the page clean and proceed. To do that, we require
2751 * total exclusion from __set_page_dirty_buffers(). That is obtained with
2754 * try_to_free_buffers() is non-blocking.
2756 static inline int buffer_busy(struct buffer_head
*bh
)
2758 return atomic_read(&bh
->b_count
) |
2759 (bh
->b_state
& ((1 << BH_Dirty
) | (1 << BH_Lock
)));
2763 drop_buffers(struct page
*page
, struct buffer_head
**buffers_to_free
)
2765 struct buffer_head
*head
= page_buffers(page
);
2766 struct buffer_head
*bh
;
2767 int was_uptodate
= 1;
2771 check_ttfb_buffer(page
, bh
);
2772 if (buffer_busy(bh
))
2774 if (!buffer_uptodate(bh
) && !buffer_req(bh
))
2776 bh
= bh
->b_this_page
;
2777 } while (bh
!= head
);
2779 if (!was_uptodate
&& PageUptodate(page
))
2783 struct buffer_head
*next
= bh
->b_this_page
;
2785 if (!list_empty(&bh
->b_assoc_buffers
))
2786 __remove_assoc_queue(bh
);
2788 } while (bh
!= head
);
2789 *buffers_to_free
= head
;
2790 __clear_page_buffers(page
);
2796 int try_to_free_buffers(struct page
*page
)
2798 struct address_space
* const mapping
= page
->mapping
;
2799 struct buffer_head
*buffers_to_free
= NULL
;
2802 BUG_ON(!PageLocked(page
));
2803 if (PageWriteback(page
))
2806 if (mapping
== NULL
) { /* swapped-in anon page */
2807 ret
= drop_buffers(page
, &buffers_to_free
);
2811 spin_lock(&mapping
->private_lock
);
2812 ret
= drop_buffers(page
, &buffers_to_free
);
2813 if (ret
&& !PageSwapCache(page
)) {
2815 * If the filesystem writes its buffers by hand (eg ext3)
2816 * then we can have clean buffers against a dirty page. We
2817 * clean the page here; otherwise later reattachment of buffers
2818 * could encounter a non-uptodate page, which is unresolvable.
2819 * This only applies in the rare case where try_to_free_buffers
2820 * succeeds but the page is not freed.
2822 clear_page_dirty(page
);
2824 spin_unlock(&mapping
->private_lock
);
2826 if (buffers_to_free
) {
2827 struct buffer_head
*bh
= buffers_to_free
;
2830 struct buffer_head
*next
= bh
->b_this_page
;
2831 free_buffer_head(bh
);
2833 } while (bh
!= buffers_to_free
);
2837 EXPORT_SYMBOL(try_to_free_buffers
);
2839 int block_sync_page(struct page
*page
)
2846 * There are no bdflush tunables left. But distributions are
2847 * still running obsolete flush daemons, so we terminate them here.
2849 * Use of bdflush() is deprecated and will be removed in a future kernel.
2850 * The `pdflush' kernel threads fully replace bdflush daemons and this call.
2852 asmlinkage
long sys_bdflush(int func
, long data
)
2854 static int msg_count
;
2856 if (!capable(CAP_SYS_ADMIN
))
2859 if (msg_count
< 5) {
2862 "warning: process `%s' used the obsolete bdflush"
2863 " system call\n", current
->comm
);
2864 printk(KERN_INFO
"Fix your initscripts?\n");
2873 * Buffer-head allocation
2875 static kmem_cache_t
*bh_cachep
;
2878 * Once the number of bh's in the machine exceeds this level, we start
2879 * stripping them in writeback.
2881 static int max_buffer_heads
;
2883 int buffer_heads_over_limit
;
2885 struct bh_accounting
{
2886 int nr
; /* Number of live bh's */
2887 int ratelimit
; /* Limit cacheline bouncing */
2890 static DEFINE_PER_CPU(struct bh_accounting
, bh_accounting
) = {0, 0};
2892 static void recalc_bh_state(void)
2897 if (__get_cpu_var(bh_accounting
).ratelimit
++ < 4096)
2899 __get_cpu_var(bh_accounting
).ratelimit
= 0;
2900 for (i
= 0; i
< NR_CPUS
; i
++) {
2902 tot
+= per_cpu(bh_accounting
, i
).nr
;
2904 buffer_heads_over_limit
= (tot
> max_buffer_heads
);
2907 struct buffer_head
*alloc_buffer_head(int gfp_flags
)
2909 struct buffer_head
*ret
= kmem_cache_alloc(bh_cachep
, gfp_flags
);
2912 __get_cpu_var(bh_accounting
).nr
++;
2918 EXPORT_SYMBOL(alloc_buffer_head
);
2920 void free_buffer_head(struct buffer_head
*bh
)
2922 BUG_ON(!list_empty(&bh
->b_assoc_buffers
));
2923 kmem_cache_free(bh_cachep
, bh
);
2925 __get_cpu_var(bh_accounting
).nr
--;
2929 EXPORT_SYMBOL(free_buffer_head
);
2932 init_buffer_head(void *data
, kmem_cache_t
*cachep
, unsigned long flags
)
2934 if ((flags
& (SLAB_CTOR_VERIFY
|SLAB_CTOR_CONSTRUCTOR
)) ==
2935 SLAB_CTOR_CONSTRUCTOR
) {
2936 struct buffer_head
* bh
= (struct buffer_head
*)data
;
2938 memset(bh
, 0, sizeof(*bh
));
2939 INIT_LIST_HEAD(&bh
->b_assoc_buffers
);
2943 static void buffer_init_cpu(int cpu
)
2945 struct bh_accounting
*bha
= &per_cpu(bh_accounting
, cpu
);
2946 struct bh_lru
*bhl
= &per_cpu(bh_lrus
, cpu
);
2950 memset(bhl
, 0, sizeof(*bhl
));
2953 static int __devinit
buffer_cpu_notify(struct notifier_block
*self
,
2954 unsigned long action
, void *hcpu
)
2956 long cpu
= (long)hcpu
;
2958 case CPU_UP_PREPARE
:
2959 buffer_init_cpu(cpu
);
2967 static struct notifier_block __devinitdata buffer_nb
= {
2968 .notifier_call
= buffer_cpu_notify
,
2971 void __init
buffer_init(void)
2976 bh_cachep
= kmem_cache_create("buffer_head",
2977 sizeof(struct buffer_head
), 0,
2978 0, init_buffer_head
, NULL
);
2979 for (i
= 0; i
< ARRAY_SIZE(bh_wait_queue_heads
); i
++)
2980 init_waitqueue_head(&bh_wait_queue_heads
[i
].wqh
);
2983 * Limit the bh occupancy to 10% of ZONE_NORMAL
2985 nrpages
= (nr_free_buffer_pages() * 10) / 100;
2986 max_buffer_heads
= nrpages
* (PAGE_SIZE
/ sizeof(struct buffer_head
));
2987 buffer_cpu_notify(&buffer_nb
, (unsigned long)CPU_UP_PREPARE
,
2988 (void *)(long)smp_processor_id());
2989 register_cpu_notifier(&buffer_nb
);