4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
10 * Removed a lot of unnecessary code and simplified things now that
11 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
13 * Speed up hash, lru, and free list operations. Use gfp() for allocating
14 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
16 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
18 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
21 #include <linux/kernel.h>
22 #include <linux/syscalls.h>
25 #include <linux/percpu.h>
26 #include <linux/slab.h>
27 #include <linux/capability.h>
28 #include <linux/blkdev.h>
29 #include <linux/file.h>
30 #include <linux/quotaops.h>
31 #include <linux/highmem.h>
32 #include <linux/module.h>
33 #include <linux/writeback.h>
34 #include <linux/hash.h>
35 #include <linux/suspend.h>
36 #include <linux/buffer_head.h>
37 #include <linux/task_io_accounting_ops.h>
38 #include <linux/bio.h>
39 #include <linux/notifier.h>
40 #include <linux/cpu.h>
41 #include <linux/bitops.h>
42 #include <linux/mpage.h>
43 #include <linux/bit_spinlock.h>
45 static int fsync_buffers_list(spinlock_t
*lock
, struct list_head
*list
);
47 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
50 init_buffer(struct buffer_head
*bh
, bh_end_io_t
*handler
, void *private)
52 bh
->b_end_io
= handler
;
53 bh
->b_private
= private;
56 static int sync_buffer(void *word
)
58 struct block_device
*bd
;
59 struct buffer_head
*bh
60 = container_of(word
, struct buffer_head
, b_state
);
65 blk_run_address_space(bd
->bd_inode
->i_mapping
);
70 void __lock_buffer(struct buffer_head
*bh
)
72 wait_on_bit_lock(&bh
->b_state
, BH_Lock
, sync_buffer
,
73 TASK_UNINTERRUPTIBLE
);
75 EXPORT_SYMBOL(__lock_buffer
);
77 void unlock_buffer(struct buffer_head
*bh
)
79 clear_bit_unlock(BH_Lock
, &bh
->b_state
);
80 smp_mb__after_clear_bit();
81 wake_up_bit(&bh
->b_state
, BH_Lock
);
85 * Block until a buffer comes unlocked. This doesn't stop it
86 * from becoming locked again - you have to lock it yourself
87 * if you want to preserve its state.
89 void __wait_on_buffer(struct buffer_head
* bh
)
91 wait_on_bit(&bh
->b_state
, BH_Lock
, sync_buffer
, TASK_UNINTERRUPTIBLE
);
95 __clear_page_buffers(struct page
*page
)
97 ClearPagePrivate(page
);
98 set_page_private(page
, 0);
99 page_cache_release(page
);
103 static int quiet_error(struct buffer_head
*bh
)
105 if (!test_bit(BH_Quiet
, &bh
->b_state
) && printk_ratelimit())
111 static void buffer_io_error(struct buffer_head
*bh
)
113 char b
[BDEVNAME_SIZE
];
114 printk(KERN_ERR
"Buffer I/O error on device %s, logical block %Lu\n",
115 bdevname(bh
->b_bdev
, b
),
116 (unsigned long long)bh
->b_blocknr
);
120 * End-of-IO handler helper function which does not touch the bh after
122 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
123 * a race there is benign: unlock_buffer() only use the bh's address for
124 * hashing after unlocking the buffer, so it doesn't actually touch the bh
127 static void __end_buffer_read_notouch(struct buffer_head
*bh
, int uptodate
)
130 set_buffer_uptodate(bh
);
132 /* This happens, due to failed READA attempts. */
133 clear_buffer_uptodate(bh
);
139 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
140 * unlock the buffer. This is what ll_rw_block uses too.
142 void end_buffer_read_sync(struct buffer_head
*bh
, int uptodate
)
144 __end_buffer_read_notouch(bh
, uptodate
);
148 void end_buffer_write_sync(struct buffer_head
*bh
, int uptodate
)
150 char b
[BDEVNAME_SIZE
];
153 set_buffer_uptodate(bh
);
155 if (!buffer_eopnotsupp(bh
) && !quiet_error(bh
)) {
157 printk(KERN_WARNING
"lost page write due to "
159 bdevname(bh
->b_bdev
, b
));
161 set_buffer_write_io_error(bh
);
162 clear_buffer_uptodate(bh
);
169 * Write out and wait upon all the dirty data associated with a block
170 * device via its mapping. Does not take the superblock lock.
172 int sync_blockdev(struct block_device
*bdev
)
177 ret
= filemap_write_and_wait(bdev
->bd_inode
->i_mapping
);
180 EXPORT_SYMBOL(sync_blockdev
);
183 * Write out and wait upon all dirty data associated with this
184 * device. Filesystem data as well as the underlying block
185 * device. Takes the superblock lock.
187 int fsync_bdev(struct block_device
*bdev
)
189 struct super_block
*sb
= get_super(bdev
);
191 int res
= fsync_super(sb
);
195 return sync_blockdev(bdev
);
199 * freeze_bdev -- lock a filesystem and force it into a consistent state
200 * @bdev: blockdevice to lock
202 * This takes the block device bd_mount_sem to make sure no new mounts
203 * happen on bdev until thaw_bdev() is called.
204 * If a superblock is found on this device, we take the s_umount semaphore
205 * on it to make sure nobody unmounts until the snapshot creation is done.
206 * The reference counter (bd_fsfreeze_count) guarantees that only the last
207 * unfreeze process can unfreeze the frozen filesystem actually when multiple
208 * freeze requests arrive simultaneously. It counts up in freeze_bdev() and
209 * count down in thaw_bdev(). When it becomes 0, thaw_bdev() will unfreeze
212 struct super_block
*freeze_bdev(struct block_device
*bdev
)
214 struct super_block
*sb
;
217 mutex_lock(&bdev
->bd_fsfreeze_mutex
);
218 if (bdev
->bd_fsfreeze_count
> 0) {
219 bdev
->bd_fsfreeze_count
++;
220 sb
= get_super(bdev
);
221 mutex_unlock(&bdev
->bd_fsfreeze_mutex
);
224 bdev
->bd_fsfreeze_count
++;
226 down(&bdev
->bd_mount_sem
);
227 sb
= get_super(bdev
);
228 if (sb
&& !(sb
->s_flags
& MS_RDONLY
)) {
229 sb
->s_frozen
= SB_FREEZE_WRITE
;
234 sb
->s_frozen
= SB_FREEZE_TRANS
;
237 sync_blockdev(sb
->s_bdev
);
239 if (sb
->s_op
->freeze_fs
) {
240 error
= sb
->s_op
->freeze_fs(sb
);
243 "VFS:Filesystem freeze failed\n");
244 sb
->s_frozen
= SB_UNFROZEN
;
246 up(&bdev
->bd_mount_sem
);
247 bdev
->bd_fsfreeze_count
--;
248 mutex_unlock(&bdev
->bd_fsfreeze_mutex
);
249 return ERR_PTR(error
);
255 mutex_unlock(&bdev
->bd_fsfreeze_mutex
);
257 return sb
; /* thaw_bdev releases s->s_umount and bd_mount_sem */
259 EXPORT_SYMBOL(freeze_bdev
);
262 * thaw_bdev -- unlock filesystem
263 * @bdev: blockdevice to unlock
264 * @sb: associated superblock
266 * Unlocks the filesystem and marks it writeable again after freeze_bdev().
268 int thaw_bdev(struct block_device
*bdev
, struct super_block
*sb
)
272 mutex_lock(&bdev
->bd_fsfreeze_mutex
);
273 if (!bdev
->bd_fsfreeze_count
) {
274 mutex_unlock(&bdev
->bd_fsfreeze_mutex
);
278 bdev
->bd_fsfreeze_count
--;
279 if (bdev
->bd_fsfreeze_count
> 0) {
282 mutex_unlock(&bdev
->bd_fsfreeze_mutex
);
287 BUG_ON(sb
->s_bdev
!= bdev
);
288 if (!(sb
->s_flags
& MS_RDONLY
)) {
289 if (sb
->s_op
->unfreeze_fs
) {
290 error
= sb
->s_op
->unfreeze_fs(sb
);
293 "VFS:Filesystem thaw failed\n");
294 sb
->s_frozen
= SB_FREEZE_TRANS
;
295 bdev
->bd_fsfreeze_count
++;
296 mutex_unlock(&bdev
->bd_fsfreeze_mutex
);
300 sb
->s_frozen
= SB_UNFROZEN
;
302 wake_up(&sb
->s_wait_unfrozen
);
307 up(&bdev
->bd_mount_sem
);
308 mutex_unlock(&bdev
->bd_fsfreeze_mutex
);
311 EXPORT_SYMBOL(thaw_bdev
);
314 * Various filesystems appear to want __find_get_block to be non-blocking.
315 * But it's the page lock which protects the buffers. To get around this,
316 * we get exclusion from try_to_free_buffers with the blockdev mapping's
319 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
320 * may be quite high. This code could TryLock the page, and if that
321 * succeeds, there is no need to take private_lock. (But if
322 * private_lock is contended then so is mapping->tree_lock).
324 static struct buffer_head
*
325 __find_get_block_slow(struct block_device
*bdev
, sector_t block
)
327 struct inode
*bd_inode
= bdev
->bd_inode
;
328 struct address_space
*bd_mapping
= bd_inode
->i_mapping
;
329 struct buffer_head
*ret
= NULL
;
331 struct buffer_head
*bh
;
332 struct buffer_head
*head
;
336 index
= block
>> (PAGE_CACHE_SHIFT
- bd_inode
->i_blkbits
);
337 page
= find_get_page(bd_mapping
, index
);
341 spin_lock(&bd_mapping
->private_lock
);
342 if (!page_has_buffers(page
))
344 head
= page_buffers(page
);
347 if (bh
->b_blocknr
== block
) {
352 if (!buffer_mapped(bh
))
354 bh
= bh
->b_this_page
;
355 } while (bh
!= head
);
357 /* we might be here because some of the buffers on this page are
358 * not mapped. This is due to various races between
359 * file io on the block device and getblk. It gets dealt with
360 * elsewhere, don't buffer_error if we had some unmapped buffers
363 printk("__find_get_block_slow() failed. "
364 "block=%llu, b_blocknr=%llu\n",
365 (unsigned long long)block
,
366 (unsigned long long)bh
->b_blocknr
);
367 printk("b_state=0x%08lx, b_size=%zu\n",
368 bh
->b_state
, bh
->b_size
);
369 printk("device blocksize: %d\n", 1 << bd_inode
->i_blkbits
);
372 spin_unlock(&bd_mapping
->private_lock
);
373 page_cache_release(page
);
378 /* If invalidate_buffers() will trash dirty buffers, it means some kind
379 of fs corruption is going on. Trashing dirty data always imply losing
380 information that was supposed to be just stored on the physical layer
383 Thus invalidate_buffers in general usage is not allwowed to trash
384 dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
385 be preserved. These buffers are simply skipped.
387 We also skip buffers which are still in use. For example this can
388 happen if a userspace program is reading the block device.
390 NOTE: In the case where the user removed a removable-media-disk even if
391 there's still dirty data not synced on disk (due a bug in the device driver
392 or due an error of the user), by not destroying the dirty buffers we could
393 generate corruption also on the next media inserted, thus a parameter is
394 necessary to handle this case in the most safe way possible (trying
395 to not corrupt also the new disk inserted with the data belonging to
396 the old now corrupted disk). Also for the ramdisk the natural thing
397 to do in order to release the ramdisk memory is to destroy dirty buffers.
399 These are two special cases. Normal usage imply the device driver
400 to issue a sync on the device (without waiting I/O completion) and
401 then an invalidate_buffers call that doesn't trash dirty buffers.
403 For handling cache coherency with the blkdev pagecache the 'update' case
404 is been introduced. It is needed to re-read from disk any pinned
405 buffer. NOTE: re-reading from disk is destructive so we can do it only
406 when we assume nobody is changing the buffercache under our I/O and when
407 we think the disk contains more recent information than the buffercache.
408 The update == 1 pass marks the buffers we need to update, the update == 2
409 pass does the actual I/O. */
410 void invalidate_bdev(struct block_device
*bdev
)
412 struct address_space
*mapping
= bdev
->bd_inode
->i_mapping
;
414 if (mapping
->nrpages
== 0)
417 invalidate_bh_lrus();
418 invalidate_mapping_pages(mapping
, 0, -1);
422 * Kick pdflush then try to free up some ZONE_NORMAL memory.
424 static void free_more_memory(void)
429 wakeup_pdflush(1024);
432 for_each_online_node(nid
) {
433 (void)first_zones_zonelist(node_zonelist(nid
, GFP_NOFS
),
434 gfp_zone(GFP_NOFS
), NULL
,
437 try_to_free_pages(node_zonelist(nid
, GFP_NOFS
), 0,
443 * I/O completion handler for block_read_full_page() - pages
444 * which come unlocked at the end of I/O.
446 static void end_buffer_async_read(struct buffer_head
*bh
, int uptodate
)
449 struct buffer_head
*first
;
450 struct buffer_head
*tmp
;
452 int page_uptodate
= 1;
454 BUG_ON(!buffer_async_read(bh
));
458 set_buffer_uptodate(bh
);
460 clear_buffer_uptodate(bh
);
461 if (!quiet_error(bh
))
467 * Be _very_ careful from here on. Bad things can happen if
468 * two buffer heads end IO at almost the same time and both
469 * decide that the page is now completely done.
471 first
= page_buffers(page
);
472 local_irq_save(flags
);
473 bit_spin_lock(BH_Uptodate_Lock
, &first
->b_state
);
474 clear_buffer_async_read(bh
);
478 if (!buffer_uptodate(tmp
))
480 if (buffer_async_read(tmp
)) {
481 BUG_ON(!buffer_locked(tmp
));
484 tmp
= tmp
->b_this_page
;
486 bit_spin_unlock(BH_Uptodate_Lock
, &first
->b_state
);
487 local_irq_restore(flags
);
490 * If none of the buffers had errors and they are all
491 * uptodate then we can set the page uptodate.
493 if (page_uptodate
&& !PageError(page
))
494 SetPageUptodate(page
);
499 bit_spin_unlock(BH_Uptodate_Lock
, &first
->b_state
);
500 local_irq_restore(flags
);
505 * Completion handler for block_write_full_page() - pages which are unlocked
506 * during I/O, and which have PageWriteback cleared upon I/O completion.
508 static void end_buffer_async_write(struct buffer_head
*bh
, int uptodate
)
510 char b
[BDEVNAME_SIZE
];
512 struct buffer_head
*first
;
513 struct buffer_head
*tmp
;
516 BUG_ON(!buffer_async_write(bh
));
520 set_buffer_uptodate(bh
);
522 if (!quiet_error(bh
)) {
524 printk(KERN_WARNING
"lost page write due to "
526 bdevname(bh
->b_bdev
, b
));
528 set_bit(AS_EIO
, &page
->mapping
->flags
);
529 set_buffer_write_io_error(bh
);
530 clear_buffer_uptodate(bh
);
534 first
= page_buffers(page
);
535 local_irq_save(flags
);
536 bit_spin_lock(BH_Uptodate_Lock
, &first
->b_state
);
538 clear_buffer_async_write(bh
);
540 tmp
= bh
->b_this_page
;
542 if (buffer_async_write(tmp
)) {
543 BUG_ON(!buffer_locked(tmp
));
546 tmp
= tmp
->b_this_page
;
548 bit_spin_unlock(BH_Uptodate_Lock
, &first
->b_state
);
549 local_irq_restore(flags
);
550 end_page_writeback(page
);
554 bit_spin_unlock(BH_Uptodate_Lock
, &first
->b_state
);
555 local_irq_restore(flags
);
560 * If a page's buffers are under async readin (end_buffer_async_read
561 * completion) then there is a possibility that another thread of
562 * control could lock one of the buffers after it has completed
563 * but while some of the other buffers have not completed. This
564 * locked buffer would confuse end_buffer_async_read() into not unlocking
565 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
566 * that this buffer is not under async I/O.
568 * The page comes unlocked when it has no locked buffer_async buffers
571 * PageLocked prevents anyone starting new async I/O reads any of
574 * PageWriteback is used to prevent simultaneous writeout of the same
577 * PageLocked prevents anyone from starting writeback of a page which is
578 * under read I/O (PageWriteback is only ever set against a locked page).
580 static void mark_buffer_async_read(struct buffer_head
*bh
)
582 bh
->b_end_io
= end_buffer_async_read
;
583 set_buffer_async_read(bh
);
586 void mark_buffer_async_write(struct buffer_head
*bh
)
588 bh
->b_end_io
= end_buffer_async_write
;
589 set_buffer_async_write(bh
);
591 EXPORT_SYMBOL(mark_buffer_async_write
);
595 * fs/buffer.c contains helper functions for buffer-backed address space's
596 * fsync functions. A common requirement for buffer-based filesystems is
597 * that certain data from the backing blockdev needs to be written out for
598 * a successful fsync(). For example, ext2 indirect blocks need to be
599 * written back and waited upon before fsync() returns.
601 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
602 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
603 * management of a list of dependent buffers at ->i_mapping->private_list.
605 * Locking is a little subtle: try_to_free_buffers() will remove buffers
606 * from their controlling inode's queue when they are being freed. But
607 * try_to_free_buffers() will be operating against the *blockdev* mapping
608 * at the time, not against the S_ISREG file which depends on those buffers.
609 * So the locking for private_list is via the private_lock in the address_space
610 * which backs the buffers. Which is different from the address_space
611 * against which the buffers are listed. So for a particular address_space,
612 * mapping->private_lock does *not* protect mapping->private_list! In fact,
613 * mapping->private_list will always be protected by the backing blockdev's
616 * Which introduces a requirement: all buffers on an address_space's
617 * ->private_list must be from the same address_space: the blockdev's.
619 * address_spaces which do not place buffers at ->private_list via these
620 * utility functions are free to use private_lock and private_list for
621 * whatever they want. The only requirement is that list_empty(private_list)
622 * be true at clear_inode() time.
624 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
625 * filesystems should do that. invalidate_inode_buffers() should just go
626 * BUG_ON(!list_empty).
628 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
629 * take an address_space, not an inode. And it should be called
630 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
633 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
634 * list if it is already on a list. Because if the buffer is on a list,
635 * it *must* already be on the right one. If not, the filesystem is being
636 * silly. This will save a ton of locking. But first we have to ensure
637 * that buffers are taken *off* the old inode's list when they are freed
638 * (presumably in truncate). That requires careful auditing of all
639 * filesystems (do it inside bforget()). It could also be done by bringing
644 * The buffer's backing address_space's private_lock must be held
646 static void __remove_assoc_queue(struct buffer_head
*bh
)
648 list_del_init(&bh
->b_assoc_buffers
);
649 WARN_ON(!bh
->b_assoc_map
);
650 if (buffer_write_io_error(bh
))
651 set_bit(AS_EIO
, &bh
->b_assoc_map
->flags
);
652 bh
->b_assoc_map
= NULL
;
655 int inode_has_buffers(struct inode
*inode
)
657 return !list_empty(&inode
->i_data
.private_list
);
661 * osync is designed to support O_SYNC io. It waits synchronously for
662 * all already-submitted IO to complete, but does not queue any new
663 * writes to the disk.
665 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
666 * you dirty the buffers, and then use osync_inode_buffers to wait for
667 * completion. Any other dirty buffers which are not yet queued for
668 * write will not be flushed to disk by the osync.
670 static int osync_buffers_list(spinlock_t
*lock
, struct list_head
*list
)
672 struct buffer_head
*bh
;
678 list_for_each_prev(p
, list
) {
680 if (buffer_locked(bh
)) {
684 if (!buffer_uptodate(bh
))
696 * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
697 * @mapping: the mapping which wants those buffers written
699 * Starts I/O against the buffers at mapping->private_list, and waits upon
702 * Basically, this is a convenience function for fsync().
703 * @mapping is a file or directory which needs those buffers to be written for
704 * a successful fsync().
706 int sync_mapping_buffers(struct address_space
*mapping
)
708 struct address_space
*buffer_mapping
= mapping
->assoc_mapping
;
710 if (buffer_mapping
== NULL
|| list_empty(&mapping
->private_list
))
713 return fsync_buffers_list(&buffer_mapping
->private_lock
,
714 &mapping
->private_list
);
716 EXPORT_SYMBOL(sync_mapping_buffers
);
719 * Called when we've recently written block `bblock', and it is known that
720 * `bblock' was for a buffer_boundary() buffer. This means that the block at
721 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
722 * dirty, schedule it for IO. So that indirects merge nicely with their data.
724 void write_boundary_block(struct block_device
*bdev
,
725 sector_t bblock
, unsigned blocksize
)
727 struct buffer_head
*bh
= __find_get_block(bdev
, bblock
+ 1, blocksize
);
729 if (buffer_dirty(bh
))
730 ll_rw_block(WRITE
, 1, &bh
);
735 void mark_buffer_dirty_inode(struct buffer_head
*bh
, struct inode
*inode
)
737 struct address_space
*mapping
= inode
->i_mapping
;
738 struct address_space
*buffer_mapping
= bh
->b_page
->mapping
;
740 mark_buffer_dirty(bh
);
741 if (!mapping
->assoc_mapping
) {
742 mapping
->assoc_mapping
= buffer_mapping
;
744 BUG_ON(mapping
->assoc_mapping
!= buffer_mapping
);
746 if (!bh
->b_assoc_map
) {
747 spin_lock(&buffer_mapping
->private_lock
);
748 list_move_tail(&bh
->b_assoc_buffers
,
749 &mapping
->private_list
);
750 bh
->b_assoc_map
= mapping
;
751 spin_unlock(&buffer_mapping
->private_lock
);
754 EXPORT_SYMBOL(mark_buffer_dirty_inode
);
757 * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
760 * If warn is true, then emit a warning if the page is not uptodate and has
761 * not been truncated.
763 static int __set_page_dirty(struct page
*page
,
764 struct address_space
*mapping
, int warn
)
766 if (unlikely(!mapping
))
767 return !TestSetPageDirty(page
);
769 if (TestSetPageDirty(page
))
772 spin_lock_irq(&mapping
->tree_lock
);
773 if (page
->mapping
) { /* Race with truncate? */
774 WARN_ON_ONCE(warn
&& !PageUptodate(page
));
776 if (mapping_cap_account_dirty(mapping
)) {
777 __inc_zone_page_state(page
, NR_FILE_DIRTY
);
778 __inc_bdi_stat(mapping
->backing_dev_info
,
780 task_io_account_write(PAGE_CACHE_SIZE
);
782 radix_tree_tag_set(&mapping
->page_tree
,
783 page_index(page
), PAGECACHE_TAG_DIRTY
);
785 spin_unlock_irq(&mapping
->tree_lock
);
786 __mark_inode_dirty(mapping
->host
, I_DIRTY_PAGES
);
792 * Add a page to the dirty page list.
794 * It is a sad fact of life that this function is called from several places
795 * deeply under spinlocking. It may not sleep.
797 * If the page has buffers, the uptodate buffers are set dirty, to preserve
798 * dirty-state coherency between the page and the buffers. It the page does
799 * not have buffers then when they are later attached they will all be set
802 * The buffers are dirtied before the page is dirtied. There's a small race
803 * window in which a writepage caller may see the page cleanness but not the
804 * buffer dirtiness. That's fine. If this code were to set the page dirty
805 * before the buffers, a concurrent writepage caller could clear the page dirty
806 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
807 * page on the dirty page list.
809 * We use private_lock to lock against try_to_free_buffers while using the
810 * page's buffer list. Also use this to protect against clean buffers being
811 * added to the page after it was set dirty.
813 * FIXME: may need to call ->reservepage here as well. That's rather up to the
814 * address_space though.
816 int __set_page_dirty_buffers(struct page
*page
)
818 struct address_space
*mapping
= page_mapping(page
);
820 if (unlikely(!mapping
))
821 return !TestSetPageDirty(page
);
823 spin_lock(&mapping
->private_lock
);
824 if (page_has_buffers(page
)) {
825 struct buffer_head
*head
= page_buffers(page
);
826 struct buffer_head
*bh
= head
;
829 set_buffer_dirty(bh
);
830 bh
= bh
->b_this_page
;
831 } while (bh
!= head
);
833 spin_unlock(&mapping
->private_lock
);
835 return __set_page_dirty(page
, mapping
, 1);
837 EXPORT_SYMBOL(__set_page_dirty_buffers
);
840 * Write out and wait upon a list of buffers.
842 * We have conflicting pressures: we want to make sure that all
843 * initially dirty buffers get waited on, but that any subsequently
844 * dirtied buffers don't. After all, we don't want fsync to last
845 * forever if somebody is actively writing to the file.
847 * Do this in two main stages: first we copy dirty buffers to a
848 * temporary inode list, queueing the writes as we go. Then we clean
849 * up, waiting for those writes to complete.
851 * During this second stage, any subsequent updates to the file may end
852 * up refiling the buffer on the original inode's dirty list again, so
853 * there is a chance we will end up with a buffer queued for write but
854 * not yet completed on that list. So, as a final cleanup we go through
855 * the osync code to catch these locked, dirty buffers without requeuing
856 * any newly dirty buffers for write.
858 static int fsync_buffers_list(spinlock_t
*lock
, struct list_head
*list
)
860 struct buffer_head
*bh
;
861 struct list_head tmp
;
862 struct address_space
*mapping
;
865 INIT_LIST_HEAD(&tmp
);
868 while (!list_empty(list
)) {
869 bh
= BH_ENTRY(list
->next
);
870 mapping
= bh
->b_assoc_map
;
871 __remove_assoc_queue(bh
);
872 /* Avoid race with mark_buffer_dirty_inode() which does
873 * a lockless check and we rely on seeing the dirty bit */
875 if (buffer_dirty(bh
) || buffer_locked(bh
)) {
876 list_add(&bh
->b_assoc_buffers
, &tmp
);
877 bh
->b_assoc_map
= mapping
;
878 if (buffer_dirty(bh
)) {
882 * Ensure any pending I/O completes so that
883 * ll_rw_block() actually writes the current
884 * contents - it is a noop if I/O is still in
885 * flight on potentially older contents.
887 ll_rw_block(SWRITE_SYNC
, 1, &bh
);
894 while (!list_empty(&tmp
)) {
895 bh
= BH_ENTRY(tmp
.prev
);
897 mapping
= bh
->b_assoc_map
;
898 __remove_assoc_queue(bh
);
899 /* Avoid race with mark_buffer_dirty_inode() which does
900 * a lockless check and we rely on seeing the dirty bit */
902 if (buffer_dirty(bh
)) {
903 list_add(&bh
->b_assoc_buffers
,
904 &mapping
->private_list
);
905 bh
->b_assoc_map
= mapping
;
909 if (!buffer_uptodate(bh
))
916 err2
= osync_buffers_list(lock
, list
);
924 * Invalidate any and all dirty buffers on a given inode. We are
925 * probably unmounting the fs, but that doesn't mean we have already
926 * done a sync(). Just drop the buffers from the inode list.
928 * NOTE: we take the inode's blockdev's mapping's private_lock. Which
929 * assumes that all the buffers are against the blockdev. Not true
932 void invalidate_inode_buffers(struct inode
*inode
)
934 if (inode_has_buffers(inode
)) {
935 struct address_space
*mapping
= &inode
->i_data
;
936 struct list_head
*list
= &mapping
->private_list
;
937 struct address_space
*buffer_mapping
= mapping
->assoc_mapping
;
939 spin_lock(&buffer_mapping
->private_lock
);
940 while (!list_empty(list
))
941 __remove_assoc_queue(BH_ENTRY(list
->next
));
942 spin_unlock(&buffer_mapping
->private_lock
);
945 EXPORT_SYMBOL(invalidate_inode_buffers
);
948 * Remove any clean buffers from the inode's buffer list. This is called
949 * when we're trying to free the inode itself. Those buffers can pin it.
951 * Returns true if all buffers were removed.
953 int remove_inode_buffers(struct inode
*inode
)
957 if (inode_has_buffers(inode
)) {
958 struct address_space
*mapping
= &inode
->i_data
;
959 struct list_head
*list
= &mapping
->private_list
;
960 struct address_space
*buffer_mapping
= mapping
->assoc_mapping
;
962 spin_lock(&buffer_mapping
->private_lock
);
963 while (!list_empty(list
)) {
964 struct buffer_head
*bh
= BH_ENTRY(list
->next
);
965 if (buffer_dirty(bh
)) {
969 __remove_assoc_queue(bh
);
971 spin_unlock(&buffer_mapping
->private_lock
);
977 * Create the appropriate buffers when given a page for data area and
978 * the size of each buffer.. Use the bh->b_this_page linked list to
979 * follow the buffers created. Return NULL if unable to create more
982 * The retry flag is used to differentiate async IO (paging, swapping)
983 * which may not fail from ordinary buffer allocations.
985 struct buffer_head
*alloc_page_buffers(struct page
*page
, unsigned long size
,
988 struct buffer_head
*bh
, *head
;
994 while ((offset
-= size
) >= 0) {
995 bh
= alloc_buffer_head(GFP_NOFS
);
1000 bh
->b_this_page
= head
;
1005 atomic_set(&bh
->b_count
, 0);
1006 bh
->b_private
= NULL
;
1009 /* Link the buffer to its page */
1010 set_bh_page(bh
, page
, offset
);
1012 init_buffer(bh
, NULL
, NULL
);
1016 * In case anything failed, we just free everything we got.
1022 head
= head
->b_this_page
;
1023 free_buffer_head(bh
);
1028 * Return failure for non-async IO requests. Async IO requests
1029 * are not allowed to fail, so we have to wait until buffer heads
1030 * become available. But we don't want tasks sleeping with
1031 * partially complete buffers, so all were released above.
1036 /* We're _really_ low on memory. Now we just
1037 * wait for old buffer heads to become free due to
1038 * finishing IO. Since this is an async request and
1039 * the reserve list is empty, we're sure there are
1040 * async buffer heads in use.
1045 EXPORT_SYMBOL_GPL(alloc_page_buffers
);
1048 link_dev_buffers(struct page
*page
, struct buffer_head
*head
)
1050 struct buffer_head
*bh
, *tail
;
1055 bh
= bh
->b_this_page
;
1057 tail
->b_this_page
= head
;
1058 attach_page_buffers(page
, head
);
1062 * Initialise the state of a blockdev page's buffers.
1065 init_page_buffers(struct page
*page
, struct block_device
*bdev
,
1066 sector_t block
, int size
)
1068 struct buffer_head
*head
= page_buffers(page
);
1069 struct buffer_head
*bh
= head
;
1070 int uptodate
= PageUptodate(page
);
1073 if (!buffer_mapped(bh
)) {
1074 init_buffer(bh
, NULL
, NULL
);
1076 bh
->b_blocknr
= block
;
1078 set_buffer_uptodate(bh
);
1079 set_buffer_mapped(bh
);
1082 bh
= bh
->b_this_page
;
1083 } while (bh
!= head
);
1087 * Create the page-cache page that contains the requested block.
1089 * This is user purely for blockdev mappings.
1091 static struct page
*
1092 grow_dev_page(struct block_device
*bdev
, sector_t block
,
1093 pgoff_t index
, int size
)
1095 struct inode
*inode
= bdev
->bd_inode
;
1097 struct buffer_head
*bh
;
1099 page
= find_or_create_page(inode
->i_mapping
, index
,
1100 (mapping_gfp_mask(inode
->i_mapping
) & ~__GFP_FS
)|__GFP_MOVABLE
);
1104 BUG_ON(!PageLocked(page
));
1106 if (page_has_buffers(page
)) {
1107 bh
= page_buffers(page
);
1108 if (bh
->b_size
== size
) {
1109 init_page_buffers(page
, bdev
, block
, size
);
1112 if (!try_to_free_buffers(page
))
1117 * Allocate some buffers for this page
1119 bh
= alloc_page_buffers(page
, size
, 0);
1124 * Link the page to the buffers and initialise them. Take the
1125 * lock to be atomic wrt __find_get_block(), which does not
1126 * run under the page lock.
1128 spin_lock(&inode
->i_mapping
->private_lock
);
1129 link_dev_buffers(page
, bh
);
1130 init_page_buffers(page
, bdev
, block
, size
);
1131 spin_unlock(&inode
->i_mapping
->private_lock
);
1137 page_cache_release(page
);
1142 * Create buffers for the specified block device block's page. If
1143 * that page was dirty, the buffers are set dirty also.
1146 grow_buffers(struct block_device
*bdev
, sector_t block
, int size
)
1155 } while ((size
<< sizebits
) < PAGE_SIZE
);
1157 index
= block
>> sizebits
;
1160 * Check for a block which wants to lie outside our maximum possible
1161 * pagecache index. (this comparison is done using sector_t types).
1163 if (unlikely(index
!= block
>> sizebits
)) {
1164 char b
[BDEVNAME_SIZE
];
1166 printk(KERN_ERR
"%s: requested out-of-range block %llu for "
1168 __func__
, (unsigned long long)block
,
1172 block
= index
<< sizebits
;
1173 /* Create a page with the proper size buffers.. */
1174 page
= grow_dev_page(bdev
, block
, index
, size
);
1178 page_cache_release(page
);
1182 static struct buffer_head
*
1183 __getblk_slow(struct block_device
*bdev
, sector_t block
, int size
)
1185 /* Size must be multiple of hard sectorsize */
1186 if (unlikely(size
& (bdev_hardsect_size(bdev
)-1) ||
1187 (size
< 512 || size
> PAGE_SIZE
))) {
1188 printk(KERN_ERR
"getblk(): invalid block size %d requested\n",
1190 printk(KERN_ERR
"hardsect size: %d\n",
1191 bdev_hardsect_size(bdev
));
1198 struct buffer_head
* bh
;
1201 bh
= __find_get_block(bdev
, block
, size
);
1205 ret
= grow_buffers(bdev
, block
, size
);
1214 * The relationship between dirty buffers and dirty pages:
1216 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1217 * the page is tagged dirty in its radix tree.
1219 * At all times, the dirtiness of the buffers represents the dirtiness of
1220 * subsections of the page. If the page has buffers, the page dirty bit is
1221 * merely a hint about the true dirty state.
1223 * When a page is set dirty in its entirety, all its buffers are marked dirty
1224 * (if the page has buffers).
1226 * When a buffer is marked dirty, its page is dirtied, but the page's other
1229 * Also. When blockdev buffers are explicitly read with bread(), they
1230 * individually become uptodate. But their backing page remains not
1231 * uptodate - even if all of its buffers are uptodate. A subsequent
1232 * block_read_full_page() against that page will discover all the uptodate
1233 * buffers, will set the page uptodate and will perform no I/O.
1237 * mark_buffer_dirty - mark a buffer_head as needing writeout
1238 * @bh: the buffer_head to mark dirty
1240 * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1241 * backing page dirty, then tag the page as dirty in its address_space's radix
1242 * tree and then attach the address_space's inode to its superblock's dirty
1245 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1246 * mapping->tree_lock and the global inode_lock.
1248 void mark_buffer_dirty(struct buffer_head
*bh
)
1250 WARN_ON_ONCE(!buffer_uptodate(bh
));
1253 * Very *carefully* optimize the it-is-already-dirty case.
1255 * Don't let the final "is it dirty" escape to before we
1256 * perhaps modified the buffer.
1258 if (buffer_dirty(bh
)) {
1260 if (buffer_dirty(bh
))
1264 if (!test_set_buffer_dirty(bh
))
1265 __set_page_dirty(bh
->b_page
, page_mapping(bh
->b_page
), 0);
1269 * Decrement a buffer_head's reference count. If all buffers against a page
1270 * have zero reference count, are clean and unlocked, and if the page is clean
1271 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1272 * in preparation for freeing it (sometimes, rarely, buffers are removed from
1273 * a page but it ends up not being freed, and buffers may later be reattached).
1275 void __brelse(struct buffer_head
* buf
)
1277 if (atomic_read(&buf
->b_count
)) {
1281 WARN(1, KERN_ERR
"VFS: brelse: Trying to free free buffer\n");
1285 * bforget() is like brelse(), except it discards any
1286 * potentially dirty data.
1288 void __bforget(struct buffer_head
*bh
)
1290 clear_buffer_dirty(bh
);
1291 if (bh
->b_assoc_map
) {
1292 struct address_space
*buffer_mapping
= bh
->b_page
->mapping
;
1294 spin_lock(&buffer_mapping
->private_lock
);
1295 list_del_init(&bh
->b_assoc_buffers
);
1296 bh
->b_assoc_map
= NULL
;
1297 spin_unlock(&buffer_mapping
->private_lock
);
1302 static struct buffer_head
*__bread_slow(struct buffer_head
*bh
)
1305 if (buffer_uptodate(bh
)) {
1310 bh
->b_end_io
= end_buffer_read_sync
;
1311 submit_bh(READ
, bh
);
1313 if (buffer_uptodate(bh
))
1321 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1322 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1323 * refcount elevated by one when they're in an LRU. A buffer can only appear
1324 * once in a particular CPU's LRU. A single buffer can be present in multiple
1325 * CPU's LRUs at the same time.
1327 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1328 * sb_find_get_block().
1330 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1331 * a local interrupt disable for that.
1334 #define BH_LRU_SIZE 8
1337 struct buffer_head
*bhs
[BH_LRU_SIZE
];
1340 static DEFINE_PER_CPU(struct bh_lru
, bh_lrus
) = {{ NULL
}};
1343 #define bh_lru_lock() local_irq_disable()
1344 #define bh_lru_unlock() local_irq_enable()
1346 #define bh_lru_lock() preempt_disable()
1347 #define bh_lru_unlock() preempt_enable()
1350 static inline void check_irqs_on(void)
1352 #ifdef irqs_disabled
1353 BUG_ON(irqs_disabled());
1358 * The LRU management algorithm is dopey-but-simple. Sorry.
1360 static void bh_lru_install(struct buffer_head
*bh
)
1362 struct buffer_head
*evictee
= NULL
;
1367 lru
= &__get_cpu_var(bh_lrus
);
1368 if (lru
->bhs
[0] != bh
) {
1369 struct buffer_head
*bhs
[BH_LRU_SIZE
];
1375 for (in
= 0; in
< BH_LRU_SIZE
; in
++) {
1376 struct buffer_head
*bh2
= lru
->bhs
[in
];
1381 if (out
>= BH_LRU_SIZE
) {
1382 BUG_ON(evictee
!= NULL
);
1389 while (out
< BH_LRU_SIZE
)
1391 memcpy(lru
->bhs
, bhs
, sizeof(bhs
));
1400 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1402 static struct buffer_head
*
1403 lookup_bh_lru(struct block_device
*bdev
, sector_t block
, unsigned size
)
1405 struct buffer_head
*ret
= NULL
;
1411 lru
= &__get_cpu_var(bh_lrus
);
1412 for (i
= 0; i
< BH_LRU_SIZE
; i
++) {
1413 struct buffer_head
*bh
= lru
->bhs
[i
];
1415 if (bh
&& bh
->b_bdev
== bdev
&&
1416 bh
->b_blocknr
== block
&& bh
->b_size
== size
) {
1419 lru
->bhs
[i
] = lru
->bhs
[i
- 1];
1434 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1435 * it in the LRU and mark it as accessed. If it is not present then return
1438 struct buffer_head
*
1439 __find_get_block(struct block_device
*bdev
, sector_t block
, unsigned size
)
1441 struct buffer_head
*bh
= lookup_bh_lru(bdev
, block
, size
);
1444 bh
= __find_get_block_slow(bdev
, block
);
1452 EXPORT_SYMBOL(__find_get_block
);
1455 * __getblk will locate (and, if necessary, create) the buffer_head
1456 * which corresponds to the passed block_device, block and size. The
1457 * returned buffer has its reference count incremented.
1459 * __getblk() cannot fail - it just keeps trying. If you pass it an
1460 * illegal block number, __getblk() will happily return a buffer_head
1461 * which represents the non-existent block. Very weird.
1463 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1464 * attempt is failing. FIXME, perhaps?
1466 struct buffer_head
*
1467 __getblk(struct block_device
*bdev
, sector_t block
, unsigned size
)
1469 struct buffer_head
*bh
= __find_get_block(bdev
, block
, size
);
1473 bh
= __getblk_slow(bdev
, block
, size
);
1476 EXPORT_SYMBOL(__getblk
);
1479 * Do async read-ahead on a buffer..
1481 void __breadahead(struct block_device
*bdev
, sector_t block
, unsigned size
)
1483 struct buffer_head
*bh
= __getblk(bdev
, block
, size
);
1485 ll_rw_block(READA
, 1, &bh
);
1489 EXPORT_SYMBOL(__breadahead
);
1492 * __bread() - reads a specified block and returns the bh
1493 * @bdev: the block_device to read from
1494 * @block: number of block
1495 * @size: size (in bytes) to read
1497 * Reads a specified block, and returns buffer head that contains it.
1498 * It returns NULL if the block was unreadable.
1500 struct buffer_head
*
1501 __bread(struct block_device
*bdev
, sector_t block
, unsigned size
)
1503 struct buffer_head
*bh
= __getblk(bdev
, block
, size
);
1505 if (likely(bh
) && !buffer_uptodate(bh
))
1506 bh
= __bread_slow(bh
);
1509 EXPORT_SYMBOL(__bread
);
1512 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1513 * This doesn't race because it runs in each cpu either in irq
1514 * or with preempt disabled.
1516 static void invalidate_bh_lru(void *arg
)
1518 struct bh_lru
*b
= &get_cpu_var(bh_lrus
);
1521 for (i
= 0; i
< BH_LRU_SIZE
; i
++) {
1525 put_cpu_var(bh_lrus
);
1528 void invalidate_bh_lrus(void)
1530 on_each_cpu(invalidate_bh_lru
, NULL
, 1);
1532 EXPORT_SYMBOL_GPL(invalidate_bh_lrus
);
1534 void set_bh_page(struct buffer_head
*bh
,
1535 struct page
*page
, unsigned long offset
)
1538 BUG_ON(offset
>= PAGE_SIZE
);
1539 if (PageHighMem(page
))
1541 * This catches illegal uses and preserves the offset:
1543 bh
->b_data
= (char *)(0 + offset
);
1545 bh
->b_data
= page_address(page
) + offset
;
1547 EXPORT_SYMBOL(set_bh_page
);
1550 * Called when truncating a buffer on a page completely.
1552 static void discard_buffer(struct buffer_head
* bh
)
1555 clear_buffer_dirty(bh
);
1557 clear_buffer_mapped(bh
);
1558 clear_buffer_req(bh
);
1559 clear_buffer_new(bh
);
1560 clear_buffer_delay(bh
);
1561 clear_buffer_unwritten(bh
);
1566 * block_invalidatepage - invalidate part of all of a buffer-backed page
1568 * @page: the page which is affected
1569 * @offset: the index of the truncation point
1571 * block_invalidatepage() is called when all or part of the page has become
1572 * invalidatedby a truncate operation.
1574 * block_invalidatepage() does not have to release all buffers, but it must
1575 * ensure that no dirty buffer is left outside @offset and that no I/O
1576 * is underway against any of the blocks which are outside the truncation
1577 * point. Because the caller is about to free (and possibly reuse) those
1580 void block_invalidatepage(struct page
*page
, unsigned long offset
)
1582 struct buffer_head
*head
, *bh
, *next
;
1583 unsigned int curr_off
= 0;
1585 BUG_ON(!PageLocked(page
));
1586 if (!page_has_buffers(page
))
1589 head
= page_buffers(page
);
1592 unsigned int next_off
= curr_off
+ bh
->b_size
;
1593 next
= bh
->b_this_page
;
1596 * is this block fully invalidated?
1598 if (offset
<= curr_off
)
1600 curr_off
= next_off
;
1602 } while (bh
!= head
);
1605 * We release buffers only if the entire page is being invalidated.
1606 * The get_block cached value has been unconditionally invalidated,
1607 * so real IO is not possible anymore.
1610 try_to_release_page(page
, 0);
1614 EXPORT_SYMBOL(block_invalidatepage
);
1617 * We attach and possibly dirty the buffers atomically wrt
1618 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
1619 * is already excluded via the page lock.
1621 void create_empty_buffers(struct page
*page
,
1622 unsigned long blocksize
, unsigned long b_state
)
1624 struct buffer_head
*bh
, *head
, *tail
;
1626 head
= alloc_page_buffers(page
, blocksize
, 1);
1629 bh
->b_state
|= b_state
;
1631 bh
= bh
->b_this_page
;
1633 tail
->b_this_page
= head
;
1635 spin_lock(&page
->mapping
->private_lock
);
1636 if (PageUptodate(page
) || PageDirty(page
)) {
1639 if (PageDirty(page
))
1640 set_buffer_dirty(bh
);
1641 if (PageUptodate(page
))
1642 set_buffer_uptodate(bh
);
1643 bh
= bh
->b_this_page
;
1644 } while (bh
!= head
);
1646 attach_page_buffers(page
, head
);
1647 spin_unlock(&page
->mapping
->private_lock
);
1649 EXPORT_SYMBOL(create_empty_buffers
);
1652 * We are taking a block for data and we don't want any output from any
1653 * buffer-cache aliases starting from return from that function and
1654 * until the moment when something will explicitly mark the buffer
1655 * dirty (hopefully that will not happen until we will free that block ;-)
1656 * We don't even need to mark it not-uptodate - nobody can expect
1657 * anything from a newly allocated buffer anyway. We used to used
1658 * unmap_buffer() for such invalidation, but that was wrong. We definitely
1659 * don't want to mark the alias unmapped, for example - it would confuse
1660 * anyone who might pick it with bread() afterwards...
1662 * Also.. Note that bforget() doesn't lock the buffer. So there can
1663 * be writeout I/O going on against recently-freed buffers. We don't
1664 * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1665 * only if we really need to. That happens here.
1667 void unmap_underlying_metadata(struct block_device
*bdev
, sector_t block
)
1669 struct buffer_head
*old_bh
;
1673 old_bh
= __find_get_block_slow(bdev
, block
);
1675 clear_buffer_dirty(old_bh
);
1676 wait_on_buffer(old_bh
);
1677 clear_buffer_req(old_bh
);
1681 EXPORT_SYMBOL(unmap_underlying_metadata
);
1684 * NOTE! All mapped/uptodate combinations are valid:
1686 * Mapped Uptodate Meaning
1688 * No No "unknown" - must do get_block()
1689 * No Yes "hole" - zero-filled
1690 * Yes No "allocated" - allocated on disk, not read in
1691 * Yes Yes "valid" - allocated and up-to-date in memory.
1693 * "Dirty" is valid only with the last case (mapped+uptodate).
1697 * While block_write_full_page is writing back the dirty buffers under
1698 * the page lock, whoever dirtied the buffers may decide to clean them
1699 * again at any time. We handle that by only looking at the buffer
1700 * state inside lock_buffer().
1702 * If block_write_full_page() is called for regular writeback
1703 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1704 * locked buffer. This only can happen if someone has written the buffer
1705 * directly, with submit_bh(). At the address_space level PageWriteback
1706 * prevents this contention from occurring.
1708 static int __block_write_full_page(struct inode
*inode
, struct page
*page
,
1709 get_block_t
*get_block
, struct writeback_control
*wbc
)
1713 sector_t last_block
;
1714 struct buffer_head
*bh
, *head
;
1715 const unsigned blocksize
= 1 << inode
->i_blkbits
;
1716 int nr_underway
= 0;
1718 BUG_ON(!PageLocked(page
));
1720 last_block
= (i_size_read(inode
) - 1) >> inode
->i_blkbits
;
1722 if (!page_has_buffers(page
)) {
1723 create_empty_buffers(page
, blocksize
,
1724 (1 << BH_Dirty
)|(1 << BH_Uptodate
));
1728 * Be very careful. We have no exclusion from __set_page_dirty_buffers
1729 * here, and the (potentially unmapped) buffers may become dirty at
1730 * any time. If a buffer becomes dirty here after we've inspected it
1731 * then we just miss that fact, and the page stays dirty.
1733 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1734 * handle that here by just cleaning them.
1737 block
= (sector_t
)page
->index
<< (PAGE_CACHE_SHIFT
- inode
->i_blkbits
);
1738 head
= page_buffers(page
);
1742 * Get all the dirty buffers mapped to disk addresses and
1743 * handle any aliases from the underlying blockdev's mapping.
1746 if (block
> last_block
) {
1748 * mapped buffers outside i_size will occur, because
1749 * this page can be outside i_size when there is a
1750 * truncate in progress.
1753 * The buffer was zeroed by block_write_full_page()
1755 clear_buffer_dirty(bh
);
1756 set_buffer_uptodate(bh
);
1757 } else if ((!buffer_mapped(bh
) || buffer_delay(bh
)) &&
1759 WARN_ON(bh
->b_size
!= blocksize
);
1760 err
= get_block(inode
, block
, bh
, 1);
1763 clear_buffer_delay(bh
);
1764 if (buffer_new(bh
)) {
1765 /* blockdev mappings never come here */
1766 clear_buffer_new(bh
);
1767 unmap_underlying_metadata(bh
->b_bdev
,
1771 bh
= bh
->b_this_page
;
1773 } while (bh
!= head
);
1776 if (!buffer_mapped(bh
))
1779 * If it's a fully non-blocking write attempt and we cannot
1780 * lock the buffer then redirty the page. Note that this can
1781 * potentially cause a busy-wait loop from pdflush and kswapd
1782 * activity, but those code paths have their own higher-level
1785 if (wbc
->sync_mode
!= WB_SYNC_NONE
|| !wbc
->nonblocking
) {
1787 } else if (!trylock_buffer(bh
)) {
1788 redirty_page_for_writepage(wbc
, page
);
1791 if (test_clear_buffer_dirty(bh
)) {
1792 mark_buffer_async_write(bh
);
1796 } while ((bh
= bh
->b_this_page
) != head
);
1799 * The page and its buffers are protected by PageWriteback(), so we can
1800 * drop the bh refcounts early.
1802 BUG_ON(PageWriteback(page
));
1803 set_page_writeback(page
);
1806 struct buffer_head
*next
= bh
->b_this_page
;
1807 if (buffer_async_write(bh
)) {
1808 submit_bh(WRITE
, bh
);
1812 } while (bh
!= head
);
1817 if (nr_underway
== 0) {
1819 * The page was marked dirty, but the buffers were
1820 * clean. Someone wrote them back by hand with
1821 * ll_rw_block/submit_bh. A rare case.
1823 end_page_writeback(page
);
1826 * The page and buffer_heads can be released at any time from
1834 * ENOSPC, or some other error. We may already have added some
1835 * blocks to the file, so we need to write these out to avoid
1836 * exposing stale data.
1837 * The page is currently locked and not marked for writeback
1840 /* Recovery: lock and submit the mapped buffers */
1842 if (buffer_mapped(bh
) && buffer_dirty(bh
) &&
1843 !buffer_delay(bh
)) {
1845 mark_buffer_async_write(bh
);
1848 * The buffer may have been set dirty during
1849 * attachment to a dirty page.
1851 clear_buffer_dirty(bh
);
1853 } while ((bh
= bh
->b_this_page
) != head
);
1855 BUG_ON(PageWriteback(page
));
1856 mapping_set_error(page
->mapping
, err
);
1857 set_page_writeback(page
);
1859 struct buffer_head
*next
= bh
->b_this_page
;
1860 if (buffer_async_write(bh
)) {
1861 clear_buffer_dirty(bh
);
1862 submit_bh(WRITE
, bh
);
1866 } while (bh
!= head
);
1872 * If a page has any new buffers, zero them out here, and mark them uptodate
1873 * and dirty so they'll be written out (in order to prevent uninitialised
1874 * block data from leaking). And clear the new bit.
1876 void page_zero_new_buffers(struct page
*page
, unsigned from
, unsigned to
)
1878 unsigned int block_start
, block_end
;
1879 struct buffer_head
*head
, *bh
;
1881 BUG_ON(!PageLocked(page
));
1882 if (!page_has_buffers(page
))
1885 bh
= head
= page_buffers(page
);
1888 block_end
= block_start
+ bh
->b_size
;
1890 if (buffer_new(bh
)) {
1891 if (block_end
> from
&& block_start
< to
) {
1892 if (!PageUptodate(page
)) {
1893 unsigned start
, size
;
1895 start
= max(from
, block_start
);
1896 size
= min(to
, block_end
) - start
;
1898 zero_user(page
, start
, size
);
1899 set_buffer_uptodate(bh
);
1902 clear_buffer_new(bh
);
1903 mark_buffer_dirty(bh
);
1907 block_start
= block_end
;
1908 bh
= bh
->b_this_page
;
1909 } while (bh
!= head
);
1911 EXPORT_SYMBOL(page_zero_new_buffers
);
1913 static int __block_prepare_write(struct inode
*inode
, struct page
*page
,
1914 unsigned from
, unsigned to
, get_block_t
*get_block
)
1916 unsigned block_start
, block_end
;
1919 unsigned blocksize
, bbits
;
1920 struct buffer_head
*bh
, *head
, *wait
[2], **wait_bh
=wait
;
1922 BUG_ON(!PageLocked(page
));
1923 BUG_ON(from
> PAGE_CACHE_SIZE
);
1924 BUG_ON(to
> PAGE_CACHE_SIZE
);
1927 blocksize
= 1 << inode
->i_blkbits
;
1928 if (!page_has_buffers(page
))
1929 create_empty_buffers(page
, blocksize
, 0);
1930 head
= page_buffers(page
);
1932 bbits
= inode
->i_blkbits
;
1933 block
= (sector_t
)page
->index
<< (PAGE_CACHE_SHIFT
- bbits
);
1935 for(bh
= head
, block_start
= 0; bh
!= head
|| !block_start
;
1936 block
++, block_start
=block_end
, bh
= bh
->b_this_page
) {
1937 block_end
= block_start
+ blocksize
;
1938 if (block_end
<= from
|| block_start
>= to
) {
1939 if (PageUptodate(page
)) {
1940 if (!buffer_uptodate(bh
))
1941 set_buffer_uptodate(bh
);
1946 clear_buffer_new(bh
);
1947 if (!buffer_mapped(bh
)) {
1948 WARN_ON(bh
->b_size
!= blocksize
);
1949 err
= get_block(inode
, block
, bh
, 1);
1952 if (buffer_new(bh
)) {
1953 unmap_underlying_metadata(bh
->b_bdev
,
1955 if (PageUptodate(page
)) {
1956 clear_buffer_new(bh
);
1957 set_buffer_uptodate(bh
);
1958 mark_buffer_dirty(bh
);
1961 if (block_end
> to
|| block_start
< from
)
1962 zero_user_segments(page
,
1968 if (PageUptodate(page
)) {
1969 if (!buffer_uptodate(bh
))
1970 set_buffer_uptodate(bh
);
1973 if (!buffer_uptodate(bh
) && !buffer_delay(bh
) &&
1974 !buffer_unwritten(bh
) &&
1975 (block_start
< from
|| block_end
> to
)) {
1976 ll_rw_block(READ
, 1, &bh
);
1981 * If we issued read requests - let them complete.
1983 while(wait_bh
> wait
) {
1984 wait_on_buffer(*--wait_bh
);
1985 if (!buffer_uptodate(*wait_bh
))
1989 page_zero_new_buffers(page
, from
, to
);
1993 static int __block_commit_write(struct inode
*inode
, struct page
*page
,
1994 unsigned from
, unsigned to
)
1996 unsigned block_start
, block_end
;
1999 struct buffer_head
*bh
, *head
;
2001 blocksize
= 1 << inode
->i_blkbits
;
2003 for(bh
= head
= page_buffers(page
), block_start
= 0;
2004 bh
!= head
|| !block_start
;
2005 block_start
=block_end
, bh
= bh
->b_this_page
) {
2006 block_end
= block_start
+ blocksize
;
2007 if (block_end
<= from
|| block_start
>= to
) {
2008 if (!buffer_uptodate(bh
))
2011 set_buffer_uptodate(bh
);
2012 mark_buffer_dirty(bh
);
2014 clear_buffer_new(bh
);
2018 * If this is a partial write which happened to make all buffers
2019 * uptodate then we can optimize away a bogus readpage() for
2020 * the next read(). Here we 'discover' whether the page went
2021 * uptodate as a result of this (potentially partial) write.
2024 SetPageUptodate(page
);
2029 * block_write_begin takes care of the basic task of block allocation and
2030 * bringing partial write blocks uptodate first.
2032 * If *pagep is not NULL, then block_write_begin uses the locked page
2033 * at *pagep rather than allocating its own. In this case, the page will
2034 * not be unlocked or deallocated on failure.
2036 int block_write_begin(struct file
*file
, struct address_space
*mapping
,
2037 loff_t pos
, unsigned len
, unsigned flags
,
2038 struct page
**pagep
, void **fsdata
,
2039 get_block_t
*get_block
)
2041 struct inode
*inode
= mapping
->host
;
2045 unsigned start
, end
;
2048 index
= pos
>> PAGE_CACHE_SHIFT
;
2049 start
= pos
& (PAGE_CACHE_SIZE
- 1);
2055 page
= grab_cache_page_write_begin(mapping
, index
, flags
);
2062 BUG_ON(!PageLocked(page
));
2064 status
= __block_prepare_write(inode
, page
, start
, end
, get_block
);
2065 if (unlikely(status
)) {
2066 ClearPageUptodate(page
);
2070 page_cache_release(page
);
2074 * prepare_write() may have instantiated a few blocks
2075 * outside i_size. Trim these off again. Don't need
2076 * i_size_read because we hold i_mutex.
2078 if (pos
+ len
> inode
->i_size
)
2079 vmtruncate(inode
, inode
->i_size
);
2086 EXPORT_SYMBOL(block_write_begin
);
2088 int block_write_end(struct file
*file
, struct address_space
*mapping
,
2089 loff_t pos
, unsigned len
, unsigned copied
,
2090 struct page
*page
, void *fsdata
)
2092 struct inode
*inode
= mapping
->host
;
2095 start
= pos
& (PAGE_CACHE_SIZE
- 1);
2097 if (unlikely(copied
< len
)) {
2099 * The buffers that were written will now be uptodate, so we
2100 * don't have to worry about a readpage reading them and
2101 * overwriting a partial write. However if we have encountered
2102 * a short write and only partially written into a buffer, it
2103 * will not be marked uptodate, so a readpage might come in and
2104 * destroy our partial write.
2106 * Do the simplest thing, and just treat any short write to a
2107 * non uptodate page as a zero-length write, and force the
2108 * caller to redo the whole thing.
2110 if (!PageUptodate(page
))
2113 page_zero_new_buffers(page
, start
+copied
, start
+len
);
2115 flush_dcache_page(page
);
2117 /* This could be a short (even 0-length) commit */
2118 __block_commit_write(inode
, page
, start
, start
+copied
);
2122 EXPORT_SYMBOL(block_write_end
);
2124 int generic_write_end(struct file
*file
, struct address_space
*mapping
,
2125 loff_t pos
, unsigned len
, unsigned copied
,
2126 struct page
*page
, void *fsdata
)
2128 struct inode
*inode
= mapping
->host
;
2129 int i_size_changed
= 0;
2131 copied
= block_write_end(file
, mapping
, pos
, len
, copied
, page
, fsdata
);
2134 * No need to use i_size_read() here, the i_size
2135 * cannot change under us because we hold i_mutex.
2137 * But it's important to update i_size while still holding page lock:
2138 * page writeout could otherwise come in and zero beyond i_size.
2140 if (pos
+copied
> inode
->i_size
) {
2141 i_size_write(inode
, pos
+copied
);
2146 page_cache_release(page
);
2149 * Don't mark the inode dirty under page lock. First, it unnecessarily
2150 * makes the holding time of page lock longer. Second, it forces lock
2151 * ordering of page lock and transaction start for journaling
2155 mark_inode_dirty(inode
);
2159 EXPORT_SYMBOL(generic_write_end
);
2162 * block_is_partially_uptodate checks whether buffers within a page are
2165 * Returns true if all buffers which correspond to a file portion
2166 * we want to read are uptodate.
2168 int block_is_partially_uptodate(struct page
*page
, read_descriptor_t
*desc
,
2171 struct inode
*inode
= page
->mapping
->host
;
2172 unsigned block_start
, block_end
, blocksize
;
2174 struct buffer_head
*bh
, *head
;
2177 if (!page_has_buffers(page
))
2180 blocksize
= 1 << inode
->i_blkbits
;
2181 to
= min_t(unsigned, PAGE_CACHE_SIZE
- from
, desc
->count
);
2183 if (from
< blocksize
&& to
> PAGE_CACHE_SIZE
- blocksize
)
2186 head
= page_buffers(page
);
2190 block_end
= block_start
+ blocksize
;
2191 if (block_end
> from
&& block_start
< to
) {
2192 if (!buffer_uptodate(bh
)) {
2196 if (block_end
>= to
)
2199 block_start
= block_end
;
2200 bh
= bh
->b_this_page
;
2201 } while (bh
!= head
);
2205 EXPORT_SYMBOL(block_is_partially_uptodate
);
2208 * Generic "read page" function for block devices that have the normal
2209 * get_block functionality. This is most of the block device filesystems.
2210 * Reads the page asynchronously --- the unlock_buffer() and
2211 * set/clear_buffer_uptodate() functions propagate buffer state into the
2212 * page struct once IO has completed.
2214 int block_read_full_page(struct page
*page
, get_block_t
*get_block
)
2216 struct inode
*inode
= page
->mapping
->host
;
2217 sector_t iblock
, lblock
;
2218 struct buffer_head
*bh
, *head
, *arr
[MAX_BUF_PER_PAGE
];
2219 unsigned int blocksize
;
2221 int fully_mapped
= 1;
2223 BUG_ON(!PageLocked(page
));
2224 blocksize
= 1 << inode
->i_blkbits
;
2225 if (!page_has_buffers(page
))
2226 create_empty_buffers(page
, blocksize
, 0);
2227 head
= page_buffers(page
);
2229 iblock
= (sector_t
)page
->index
<< (PAGE_CACHE_SHIFT
- inode
->i_blkbits
);
2230 lblock
= (i_size_read(inode
)+blocksize
-1) >> inode
->i_blkbits
;
2236 if (buffer_uptodate(bh
))
2239 if (!buffer_mapped(bh
)) {
2243 if (iblock
< lblock
) {
2244 WARN_ON(bh
->b_size
!= blocksize
);
2245 err
= get_block(inode
, iblock
, bh
, 0);
2249 if (!buffer_mapped(bh
)) {
2250 zero_user(page
, i
* blocksize
, blocksize
);
2252 set_buffer_uptodate(bh
);
2256 * get_block() might have updated the buffer
2259 if (buffer_uptodate(bh
))
2263 } while (i
++, iblock
++, (bh
= bh
->b_this_page
) != head
);
2266 SetPageMappedToDisk(page
);
2270 * All buffers are uptodate - we can set the page uptodate
2271 * as well. But not if get_block() returned an error.
2273 if (!PageError(page
))
2274 SetPageUptodate(page
);
2279 /* Stage two: lock the buffers */
2280 for (i
= 0; i
< nr
; i
++) {
2283 mark_buffer_async_read(bh
);
2287 * Stage 3: start the IO. Check for uptodateness
2288 * inside the buffer lock in case another process reading
2289 * the underlying blockdev brought it uptodate (the sct fix).
2291 for (i
= 0; i
< nr
; i
++) {
2293 if (buffer_uptodate(bh
))
2294 end_buffer_async_read(bh
, 1);
2296 submit_bh(READ
, bh
);
2301 /* utility function for filesystems that need to do work on expanding
2302 * truncates. Uses filesystem pagecache writes to allow the filesystem to
2303 * deal with the hole.
2305 int generic_cont_expand_simple(struct inode
*inode
, loff_t size
)
2307 struct address_space
*mapping
= inode
->i_mapping
;
2310 unsigned long limit
;
2314 limit
= current
->signal
->rlim
[RLIMIT_FSIZE
].rlim_cur
;
2315 if (limit
!= RLIM_INFINITY
&& size
> (loff_t
)limit
) {
2316 send_sig(SIGXFSZ
, current
, 0);
2319 if (size
> inode
->i_sb
->s_maxbytes
)
2322 err
= pagecache_write_begin(NULL
, mapping
, size
, 0,
2323 AOP_FLAG_UNINTERRUPTIBLE
|AOP_FLAG_CONT_EXPAND
,
2328 err
= pagecache_write_end(NULL
, mapping
, size
, 0, 0, page
, fsdata
);
2335 static int cont_expand_zero(struct file
*file
, struct address_space
*mapping
,
2336 loff_t pos
, loff_t
*bytes
)
2338 struct inode
*inode
= mapping
->host
;
2339 unsigned blocksize
= 1 << inode
->i_blkbits
;
2342 pgoff_t index
, curidx
;
2344 unsigned zerofrom
, offset
, len
;
2347 index
= pos
>> PAGE_CACHE_SHIFT
;
2348 offset
= pos
& ~PAGE_CACHE_MASK
;
2350 while (index
> (curidx
= (curpos
= *bytes
)>>PAGE_CACHE_SHIFT
)) {
2351 zerofrom
= curpos
& ~PAGE_CACHE_MASK
;
2352 if (zerofrom
& (blocksize
-1)) {
2353 *bytes
|= (blocksize
-1);
2356 len
= PAGE_CACHE_SIZE
- zerofrom
;
2358 err
= pagecache_write_begin(file
, mapping
, curpos
, len
,
2359 AOP_FLAG_UNINTERRUPTIBLE
,
2363 zero_user(page
, zerofrom
, len
);
2364 err
= pagecache_write_end(file
, mapping
, curpos
, len
, len
,
2371 balance_dirty_pages_ratelimited(mapping
);
2374 /* page covers the boundary, find the boundary offset */
2375 if (index
== curidx
) {
2376 zerofrom
= curpos
& ~PAGE_CACHE_MASK
;
2377 /* if we will expand the thing last block will be filled */
2378 if (offset
<= zerofrom
) {
2381 if (zerofrom
& (blocksize
-1)) {
2382 *bytes
|= (blocksize
-1);
2385 len
= offset
- zerofrom
;
2387 err
= pagecache_write_begin(file
, mapping
, curpos
, len
,
2388 AOP_FLAG_UNINTERRUPTIBLE
,
2392 zero_user(page
, zerofrom
, len
);
2393 err
= pagecache_write_end(file
, mapping
, curpos
, len
, len
,
2405 * For moronic filesystems that do not allow holes in file.
2406 * We may have to extend the file.
2408 int cont_write_begin(struct file
*file
, struct address_space
*mapping
,
2409 loff_t pos
, unsigned len
, unsigned flags
,
2410 struct page
**pagep
, void **fsdata
,
2411 get_block_t
*get_block
, loff_t
*bytes
)
2413 struct inode
*inode
= mapping
->host
;
2414 unsigned blocksize
= 1 << inode
->i_blkbits
;
2418 err
= cont_expand_zero(file
, mapping
, pos
, bytes
);
2422 zerofrom
= *bytes
& ~PAGE_CACHE_MASK
;
2423 if (pos
+len
> *bytes
&& zerofrom
& (blocksize
-1)) {
2424 *bytes
|= (blocksize
-1);
2429 err
= block_write_begin(file
, mapping
, pos
, len
,
2430 flags
, pagep
, fsdata
, get_block
);
2435 int block_prepare_write(struct page
*page
, unsigned from
, unsigned to
,
2436 get_block_t
*get_block
)
2438 struct inode
*inode
= page
->mapping
->host
;
2439 int err
= __block_prepare_write(inode
, page
, from
, to
, get_block
);
2441 ClearPageUptodate(page
);
2445 int block_commit_write(struct page
*page
, unsigned from
, unsigned to
)
2447 struct inode
*inode
= page
->mapping
->host
;
2448 __block_commit_write(inode
,page
,from
,to
);
2453 * block_page_mkwrite() is not allowed to change the file size as it gets
2454 * called from a page fault handler when a page is first dirtied. Hence we must
2455 * be careful to check for EOF conditions here. We set the page up correctly
2456 * for a written page which means we get ENOSPC checking when writing into
2457 * holes and correct delalloc and unwritten extent mapping on filesystems that
2458 * support these features.
2460 * We are not allowed to take the i_mutex here so we have to play games to
2461 * protect against truncate races as the page could now be beyond EOF. Because
2462 * vmtruncate() writes the inode size before removing pages, once we have the
2463 * page lock we can determine safely if the page is beyond EOF. If it is not
2464 * beyond EOF, then the page is guaranteed safe against truncation until we
2468 block_page_mkwrite(struct vm_area_struct
*vma
, struct page
*page
,
2469 get_block_t get_block
)
2471 struct inode
*inode
= vma
->vm_file
->f_path
.dentry
->d_inode
;
2477 size
= i_size_read(inode
);
2478 if ((page
->mapping
!= inode
->i_mapping
) ||
2479 (page_offset(page
) > size
)) {
2480 /* page got truncated out from underneath us */
2484 /* page is wholly or partially inside EOF */
2485 if (((page
->index
+ 1) << PAGE_CACHE_SHIFT
) > size
)
2486 end
= size
& ~PAGE_CACHE_MASK
;
2488 end
= PAGE_CACHE_SIZE
;
2490 ret
= block_prepare_write(page
, 0, end
, get_block
);
2492 ret
= block_commit_write(page
, 0, end
);
2500 * nobh_write_begin()'s prereads are special: the buffer_heads are freed
2501 * immediately, while under the page lock. So it needs a special end_io
2502 * handler which does not touch the bh after unlocking it.
2504 static void end_buffer_read_nobh(struct buffer_head
*bh
, int uptodate
)
2506 __end_buffer_read_notouch(bh
, uptodate
);
2510 * Attach the singly-linked list of buffers created by nobh_write_begin, to
2511 * the page (converting it to circular linked list and taking care of page
2514 static void attach_nobh_buffers(struct page
*page
, struct buffer_head
*head
)
2516 struct buffer_head
*bh
;
2518 BUG_ON(!PageLocked(page
));
2520 spin_lock(&page
->mapping
->private_lock
);
2523 if (PageDirty(page
))
2524 set_buffer_dirty(bh
);
2525 if (!bh
->b_this_page
)
2526 bh
->b_this_page
= head
;
2527 bh
= bh
->b_this_page
;
2528 } while (bh
!= head
);
2529 attach_page_buffers(page
, head
);
2530 spin_unlock(&page
->mapping
->private_lock
);
2534 * On entry, the page is fully not uptodate.
2535 * On exit the page is fully uptodate in the areas outside (from,to)
2537 int nobh_write_begin(struct file
*file
, struct address_space
*mapping
,
2538 loff_t pos
, unsigned len
, unsigned flags
,
2539 struct page
**pagep
, void **fsdata
,
2540 get_block_t
*get_block
)
2542 struct inode
*inode
= mapping
->host
;
2543 const unsigned blkbits
= inode
->i_blkbits
;
2544 const unsigned blocksize
= 1 << blkbits
;
2545 struct buffer_head
*head
, *bh
;
2549 unsigned block_in_page
;
2550 unsigned block_start
, block_end
;
2551 sector_t block_in_file
;
2554 int is_mapped_to_disk
= 1;
2556 index
= pos
>> PAGE_CACHE_SHIFT
;
2557 from
= pos
& (PAGE_CACHE_SIZE
- 1);
2560 page
= grab_cache_page_write_begin(mapping
, index
, flags
);
2566 if (page_has_buffers(page
)) {
2568 page_cache_release(page
);
2570 return block_write_begin(file
, mapping
, pos
, len
, flags
, pagep
,
2574 if (PageMappedToDisk(page
))
2578 * Allocate buffers so that we can keep track of state, and potentially
2579 * attach them to the page if an error occurs. In the common case of
2580 * no error, they will just be freed again without ever being attached
2581 * to the page (which is all OK, because we're under the page lock).
2583 * Be careful: the buffer linked list is a NULL terminated one, rather
2584 * than the circular one we're used to.
2586 head
= alloc_page_buffers(page
, blocksize
, 0);
2592 block_in_file
= (sector_t
)page
->index
<< (PAGE_CACHE_SHIFT
- blkbits
);
2595 * We loop across all blocks in the page, whether or not they are
2596 * part of the affected region. This is so we can discover if the
2597 * page is fully mapped-to-disk.
2599 for (block_start
= 0, block_in_page
= 0, bh
= head
;
2600 block_start
< PAGE_CACHE_SIZE
;
2601 block_in_page
++, block_start
+= blocksize
, bh
= bh
->b_this_page
) {
2604 block_end
= block_start
+ blocksize
;
2607 if (block_start
>= to
)
2609 ret
= get_block(inode
, block_in_file
+ block_in_page
,
2613 if (!buffer_mapped(bh
))
2614 is_mapped_to_disk
= 0;
2616 unmap_underlying_metadata(bh
->b_bdev
, bh
->b_blocknr
);
2617 if (PageUptodate(page
)) {
2618 set_buffer_uptodate(bh
);
2621 if (buffer_new(bh
) || !buffer_mapped(bh
)) {
2622 zero_user_segments(page
, block_start
, from
,
2626 if (buffer_uptodate(bh
))
2627 continue; /* reiserfs does this */
2628 if (block_start
< from
|| block_end
> to
) {
2630 bh
->b_end_io
= end_buffer_read_nobh
;
2631 submit_bh(READ
, bh
);
2638 * The page is locked, so these buffers are protected from
2639 * any VM or truncate activity. Hence we don't need to care
2640 * for the buffer_head refcounts.
2642 for (bh
= head
; bh
; bh
= bh
->b_this_page
) {
2644 if (!buffer_uptodate(bh
))
2651 if (is_mapped_to_disk
)
2652 SetPageMappedToDisk(page
);
2654 *fsdata
= head
; /* to be released by nobh_write_end */
2661 * Error recovery is a bit difficult. We need to zero out blocks that
2662 * were newly allocated, and dirty them to ensure they get written out.
2663 * Buffers need to be attached to the page at this point, otherwise
2664 * the handling of potential IO errors during writeout would be hard
2665 * (could try doing synchronous writeout, but what if that fails too?)
2667 attach_nobh_buffers(page
, head
);
2668 page_zero_new_buffers(page
, from
, to
);
2672 page_cache_release(page
);
2675 if (pos
+ len
> inode
->i_size
)
2676 vmtruncate(inode
, inode
->i_size
);
2680 EXPORT_SYMBOL(nobh_write_begin
);
2682 int nobh_write_end(struct file
*file
, struct address_space
*mapping
,
2683 loff_t pos
, unsigned len
, unsigned copied
,
2684 struct page
*page
, void *fsdata
)
2686 struct inode
*inode
= page
->mapping
->host
;
2687 struct buffer_head
*head
= fsdata
;
2688 struct buffer_head
*bh
;
2689 BUG_ON(fsdata
!= NULL
&& page_has_buffers(page
));
2691 if (unlikely(copied
< len
) && !page_has_buffers(page
))
2692 attach_nobh_buffers(page
, head
);
2693 if (page_has_buffers(page
))
2694 return generic_write_end(file
, mapping
, pos
, len
,
2695 copied
, page
, fsdata
);
2697 SetPageUptodate(page
);
2698 set_page_dirty(page
);
2699 if (pos
+copied
> inode
->i_size
) {
2700 i_size_write(inode
, pos
+copied
);
2701 mark_inode_dirty(inode
);
2705 page_cache_release(page
);
2709 head
= head
->b_this_page
;
2710 free_buffer_head(bh
);
2715 EXPORT_SYMBOL(nobh_write_end
);
2718 * nobh_writepage() - based on block_full_write_page() except
2719 * that it tries to operate without attaching bufferheads to
2722 int nobh_writepage(struct page
*page
, get_block_t
*get_block
,
2723 struct writeback_control
*wbc
)
2725 struct inode
* const inode
= page
->mapping
->host
;
2726 loff_t i_size
= i_size_read(inode
);
2727 const pgoff_t end_index
= i_size
>> PAGE_CACHE_SHIFT
;
2731 /* Is the page fully inside i_size? */
2732 if (page
->index
< end_index
)
2735 /* Is the page fully outside i_size? (truncate in progress) */
2736 offset
= i_size
& (PAGE_CACHE_SIZE
-1);
2737 if (page
->index
>= end_index
+1 || !offset
) {
2739 * The page may have dirty, unmapped buffers. For example,
2740 * they may have been added in ext3_writepage(). Make them
2741 * freeable here, so the page does not leak.
2744 /* Not really sure about this - do we need this ? */
2745 if (page
->mapping
->a_ops
->invalidatepage
)
2746 page
->mapping
->a_ops
->invalidatepage(page
, offset
);
2749 return 0; /* don't care */
2753 * The page straddles i_size. It must be zeroed out on each and every
2754 * writepage invocation because it may be mmapped. "A file is mapped
2755 * in multiples of the page size. For a file that is not a multiple of
2756 * the page size, the remaining memory is zeroed when mapped, and
2757 * writes to that region are not written out to the file."
2759 zero_user_segment(page
, offset
, PAGE_CACHE_SIZE
);
2761 ret
= mpage_writepage(page
, get_block
, wbc
);
2763 ret
= __block_write_full_page(inode
, page
, get_block
, wbc
);
2766 EXPORT_SYMBOL(nobh_writepage
);
2768 int nobh_truncate_page(struct address_space
*mapping
,
2769 loff_t from
, get_block_t
*get_block
)
2771 pgoff_t index
= from
>> PAGE_CACHE_SHIFT
;
2772 unsigned offset
= from
& (PAGE_CACHE_SIZE
-1);
2775 unsigned length
, pos
;
2776 struct inode
*inode
= mapping
->host
;
2778 struct buffer_head map_bh
;
2781 blocksize
= 1 << inode
->i_blkbits
;
2782 length
= offset
& (blocksize
- 1);
2784 /* Block boundary? Nothing to do */
2788 length
= blocksize
- length
;
2789 iblock
= (sector_t
)index
<< (PAGE_CACHE_SHIFT
- inode
->i_blkbits
);
2791 page
= grab_cache_page(mapping
, index
);
2796 if (page_has_buffers(page
)) {
2799 page_cache_release(page
);
2800 return block_truncate_page(mapping
, from
, get_block
);
2803 /* Find the buffer that contains "offset" */
2805 while (offset
>= pos
) {
2810 err
= get_block(inode
, iblock
, &map_bh
, 0);
2813 /* unmapped? It's a hole - nothing to do */
2814 if (!buffer_mapped(&map_bh
))
2817 /* Ok, it's mapped. Make sure it's up-to-date */
2818 if (!PageUptodate(page
)) {
2819 err
= mapping
->a_ops
->readpage(NULL
, page
);
2821 page_cache_release(page
);
2825 if (!PageUptodate(page
)) {
2829 if (page_has_buffers(page
))
2832 zero_user(page
, offset
, length
);
2833 set_page_dirty(page
);
2838 page_cache_release(page
);
2842 EXPORT_SYMBOL(nobh_truncate_page
);
2844 int block_truncate_page(struct address_space
*mapping
,
2845 loff_t from
, get_block_t
*get_block
)
2847 pgoff_t index
= from
>> PAGE_CACHE_SHIFT
;
2848 unsigned offset
= from
& (PAGE_CACHE_SIZE
-1);
2851 unsigned length
, pos
;
2852 struct inode
*inode
= mapping
->host
;
2854 struct buffer_head
*bh
;
2857 blocksize
= 1 << inode
->i_blkbits
;
2858 length
= offset
& (blocksize
- 1);
2860 /* Block boundary? Nothing to do */
2864 length
= blocksize
- length
;
2865 iblock
= (sector_t
)index
<< (PAGE_CACHE_SHIFT
- inode
->i_blkbits
);
2867 page
= grab_cache_page(mapping
, index
);
2872 if (!page_has_buffers(page
))
2873 create_empty_buffers(page
, blocksize
, 0);
2875 /* Find the buffer that contains "offset" */
2876 bh
= page_buffers(page
);
2878 while (offset
>= pos
) {
2879 bh
= bh
->b_this_page
;
2885 if (!buffer_mapped(bh
)) {
2886 WARN_ON(bh
->b_size
!= blocksize
);
2887 err
= get_block(inode
, iblock
, bh
, 0);
2890 /* unmapped? It's a hole - nothing to do */
2891 if (!buffer_mapped(bh
))
2895 /* Ok, it's mapped. Make sure it's up-to-date */
2896 if (PageUptodate(page
))
2897 set_buffer_uptodate(bh
);
2899 if (!buffer_uptodate(bh
) && !buffer_delay(bh
) && !buffer_unwritten(bh
)) {
2901 ll_rw_block(READ
, 1, &bh
);
2903 /* Uhhuh. Read error. Complain and punt. */
2904 if (!buffer_uptodate(bh
))
2908 zero_user(page
, offset
, length
);
2909 mark_buffer_dirty(bh
);
2914 page_cache_release(page
);
2920 * The generic ->writepage function for buffer-backed address_spaces
2922 int block_write_full_page(struct page
*page
, get_block_t
*get_block
,
2923 struct writeback_control
*wbc
)
2925 struct inode
* const inode
= page
->mapping
->host
;
2926 loff_t i_size
= i_size_read(inode
);
2927 const pgoff_t end_index
= i_size
>> PAGE_CACHE_SHIFT
;
2930 /* Is the page fully inside i_size? */
2931 if (page
->index
< end_index
)
2932 return __block_write_full_page(inode
, page
, get_block
, wbc
);
2934 /* Is the page fully outside i_size? (truncate in progress) */
2935 offset
= i_size
& (PAGE_CACHE_SIZE
-1);
2936 if (page
->index
>= end_index
+1 || !offset
) {
2938 * The page may have dirty, unmapped buffers. For example,
2939 * they may have been added in ext3_writepage(). Make them
2940 * freeable here, so the page does not leak.
2942 do_invalidatepage(page
, 0);
2944 return 0; /* don't care */
2948 * The page straddles i_size. It must be zeroed out on each and every
2949 * writepage invokation because it may be mmapped. "A file is mapped
2950 * in multiples of the page size. For a file that is not a multiple of
2951 * the page size, the remaining memory is zeroed when mapped, and
2952 * writes to that region are not written out to the file."
2954 zero_user_segment(page
, offset
, PAGE_CACHE_SIZE
);
2955 return __block_write_full_page(inode
, page
, get_block
, wbc
);
2958 sector_t
generic_block_bmap(struct address_space
*mapping
, sector_t block
,
2959 get_block_t
*get_block
)
2961 struct buffer_head tmp
;
2962 struct inode
*inode
= mapping
->host
;
2965 tmp
.b_size
= 1 << inode
->i_blkbits
;
2966 get_block(inode
, block
, &tmp
, 0);
2967 return tmp
.b_blocknr
;
2970 static void end_bio_bh_io_sync(struct bio
*bio
, int err
)
2972 struct buffer_head
*bh
= bio
->bi_private
;
2974 if (err
== -EOPNOTSUPP
) {
2975 set_bit(BIO_EOPNOTSUPP
, &bio
->bi_flags
);
2976 set_bit(BH_Eopnotsupp
, &bh
->b_state
);
2979 if (unlikely (test_bit(BIO_QUIET
,&bio
->bi_flags
)))
2980 set_bit(BH_Quiet
, &bh
->b_state
);
2982 bh
->b_end_io(bh
, test_bit(BIO_UPTODATE
, &bio
->bi_flags
));
2986 int submit_bh(int rw
, struct buffer_head
* bh
)
2991 BUG_ON(!buffer_locked(bh
));
2992 BUG_ON(!buffer_mapped(bh
));
2993 BUG_ON(!bh
->b_end_io
);
2996 * Mask in barrier bit for a write (could be either a WRITE or a
2999 if (buffer_ordered(bh
) && (rw
& WRITE
))
3000 rw
|= WRITE_BARRIER
;
3003 * Only clear out a write error when rewriting
3005 if (test_set_buffer_req(bh
) && (rw
& WRITE
))
3006 clear_buffer_write_io_error(bh
);
3009 * from here on down, it's all bio -- do the initial mapping,
3010 * submit_bio -> generic_make_request may further map this bio around
3012 bio
= bio_alloc(GFP_NOIO
, 1);
3014 bio
->bi_sector
= bh
->b_blocknr
* (bh
->b_size
>> 9);
3015 bio
->bi_bdev
= bh
->b_bdev
;
3016 bio
->bi_io_vec
[0].bv_page
= bh
->b_page
;
3017 bio
->bi_io_vec
[0].bv_len
= bh
->b_size
;
3018 bio
->bi_io_vec
[0].bv_offset
= bh_offset(bh
);
3022 bio
->bi_size
= bh
->b_size
;
3024 bio
->bi_end_io
= end_bio_bh_io_sync
;
3025 bio
->bi_private
= bh
;
3028 submit_bio(rw
, bio
);
3030 if (bio_flagged(bio
, BIO_EOPNOTSUPP
))
3038 * ll_rw_block: low-level access to block devices (DEPRECATED)
3039 * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
3040 * @nr: number of &struct buffer_heads in the array
3041 * @bhs: array of pointers to &struct buffer_head
3043 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
3044 * requests an I/O operation on them, either a %READ or a %WRITE. The third
3045 * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
3046 * are sent to disk. The fourth %READA option is described in the documentation
3047 * for generic_make_request() which ll_rw_block() calls.
3049 * This function drops any buffer that it cannot get a lock on (with the
3050 * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
3051 * clean when doing a write request, and any buffer that appears to be
3052 * up-to-date when doing read request. Further it marks as clean buffers that
3053 * are processed for writing (the buffer cache won't assume that they are
3054 * actually clean until the buffer gets unlocked).
3056 * ll_rw_block sets b_end_io to simple completion handler that marks
3057 * the buffer up-to-date (if approriate), unlocks the buffer and wakes
3060 * All of the buffers must be for the same device, and must also be a
3061 * multiple of the current approved size for the device.
3063 void ll_rw_block(int rw
, int nr
, struct buffer_head
*bhs
[])
3067 for (i
= 0; i
< nr
; i
++) {
3068 struct buffer_head
*bh
= bhs
[i
];
3070 if (rw
== SWRITE
|| rw
== SWRITE_SYNC
)
3072 else if (!trylock_buffer(bh
))
3075 if (rw
== WRITE
|| rw
== SWRITE
|| rw
== SWRITE_SYNC
) {
3076 if (test_clear_buffer_dirty(bh
)) {
3077 bh
->b_end_io
= end_buffer_write_sync
;
3079 if (rw
== SWRITE_SYNC
)
3080 submit_bh(WRITE_SYNC
, bh
);
3082 submit_bh(WRITE
, bh
);
3086 if (!buffer_uptodate(bh
)) {
3087 bh
->b_end_io
= end_buffer_read_sync
;
3098 * For a data-integrity writeout, we need to wait upon any in-progress I/O
3099 * and then start new I/O and then wait upon it. The caller must have a ref on
3102 int sync_dirty_buffer(struct buffer_head
*bh
)
3106 WARN_ON(atomic_read(&bh
->b_count
) < 1);
3108 if (test_clear_buffer_dirty(bh
)) {
3110 bh
->b_end_io
= end_buffer_write_sync
;
3111 ret
= submit_bh(WRITE_SYNC
, bh
);
3113 if (buffer_eopnotsupp(bh
)) {
3114 clear_buffer_eopnotsupp(bh
);
3117 if (!ret
&& !buffer_uptodate(bh
))
3126 * try_to_free_buffers() checks if all the buffers on this particular page
3127 * are unused, and releases them if so.
3129 * Exclusion against try_to_free_buffers may be obtained by either
3130 * locking the page or by holding its mapping's private_lock.
3132 * If the page is dirty but all the buffers are clean then we need to
3133 * be sure to mark the page clean as well. This is because the page
3134 * may be against a block device, and a later reattachment of buffers
3135 * to a dirty page will set *all* buffers dirty. Which would corrupt
3136 * filesystem data on the same device.
3138 * The same applies to regular filesystem pages: if all the buffers are
3139 * clean then we set the page clean and proceed. To do that, we require
3140 * total exclusion from __set_page_dirty_buffers(). That is obtained with
3143 * try_to_free_buffers() is non-blocking.
3145 static inline int buffer_busy(struct buffer_head
*bh
)
3147 return atomic_read(&bh
->b_count
) |
3148 (bh
->b_state
& ((1 << BH_Dirty
) | (1 << BH_Lock
)));
3152 drop_buffers(struct page
*page
, struct buffer_head
**buffers_to_free
)
3154 struct buffer_head
*head
= page_buffers(page
);
3155 struct buffer_head
*bh
;
3159 if (buffer_write_io_error(bh
) && page
->mapping
)
3160 set_bit(AS_EIO
, &page
->mapping
->flags
);
3161 if (buffer_busy(bh
))
3163 bh
= bh
->b_this_page
;
3164 } while (bh
!= head
);
3167 struct buffer_head
*next
= bh
->b_this_page
;
3169 if (bh
->b_assoc_map
)
3170 __remove_assoc_queue(bh
);
3172 } while (bh
!= head
);
3173 *buffers_to_free
= head
;
3174 __clear_page_buffers(page
);
3180 int try_to_free_buffers(struct page
*page
)
3182 struct address_space
* const mapping
= page
->mapping
;
3183 struct buffer_head
*buffers_to_free
= NULL
;
3186 BUG_ON(!PageLocked(page
));
3187 if (PageWriteback(page
))
3190 if (mapping
== NULL
) { /* can this still happen? */
3191 ret
= drop_buffers(page
, &buffers_to_free
);
3195 spin_lock(&mapping
->private_lock
);
3196 ret
= drop_buffers(page
, &buffers_to_free
);
3199 * If the filesystem writes its buffers by hand (eg ext3)
3200 * then we can have clean buffers against a dirty page. We
3201 * clean the page here; otherwise the VM will never notice
3202 * that the filesystem did any IO at all.
3204 * Also, during truncate, discard_buffer will have marked all
3205 * the page's buffers clean. We discover that here and clean
3208 * private_lock must be held over this entire operation in order
3209 * to synchronise against __set_page_dirty_buffers and prevent the
3210 * dirty bit from being lost.
3213 cancel_dirty_page(page
, PAGE_CACHE_SIZE
);
3214 spin_unlock(&mapping
->private_lock
);
3216 if (buffers_to_free
) {
3217 struct buffer_head
*bh
= buffers_to_free
;
3220 struct buffer_head
*next
= bh
->b_this_page
;
3221 free_buffer_head(bh
);
3223 } while (bh
!= buffers_to_free
);
3227 EXPORT_SYMBOL(try_to_free_buffers
);
3229 void block_sync_page(struct page
*page
)
3231 struct address_space
*mapping
;
3234 mapping
= page_mapping(page
);
3236 blk_run_backing_dev(mapping
->backing_dev_info
, page
);
3240 * There are no bdflush tunables left. But distributions are
3241 * still running obsolete flush daemons, so we terminate them here.
3243 * Use of bdflush() is deprecated and will be removed in a future kernel.
3244 * The `pdflush' kernel threads fully replace bdflush daemons and this call.
3246 SYSCALL_DEFINE2(bdflush
, int, func
, long, data
)
3248 static int msg_count
;
3250 if (!capable(CAP_SYS_ADMIN
))
3253 if (msg_count
< 5) {
3256 "warning: process `%s' used the obsolete bdflush"
3257 " system call\n", current
->comm
);
3258 printk(KERN_INFO
"Fix your initscripts?\n");
3267 * Buffer-head allocation
3269 static struct kmem_cache
*bh_cachep
;
3272 * Once the number of bh's in the machine exceeds this level, we start
3273 * stripping them in writeback.
3275 static int max_buffer_heads
;
3277 int buffer_heads_over_limit
;
3279 struct bh_accounting
{
3280 int nr
; /* Number of live bh's */
3281 int ratelimit
; /* Limit cacheline bouncing */
3284 static DEFINE_PER_CPU(struct bh_accounting
, bh_accounting
) = {0, 0};
3286 static void recalc_bh_state(void)
3291 if (__get_cpu_var(bh_accounting
).ratelimit
++ < 4096)
3293 __get_cpu_var(bh_accounting
).ratelimit
= 0;
3294 for_each_online_cpu(i
)
3295 tot
+= per_cpu(bh_accounting
, i
).nr
;
3296 buffer_heads_over_limit
= (tot
> max_buffer_heads
);
3299 struct buffer_head
*alloc_buffer_head(gfp_t gfp_flags
)
3301 struct buffer_head
*ret
= kmem_cache_alloc(bh_cachep
, gfp_flags
);
3303 INIT_LIST_HEAD(&ret
->b_assoc_buffers
);
3304 get_cpu_var(bh_accounting
).nr
++;
3306 put_cpu_var(bh_accounting
);
3310 EXPORT_SYMBOL(alloc_buffer_head
);
3312 void free_buffer_head(struct buffer_head
*bh
)
3314 BUG_ON(!list_empty(&bh
->b_assoc_buffers
));
3315 kmem_cache_free(bh_cachep
, bh
);
3316 get_cpu_var(bh_accounting
).nr
--;
3318 put_cpu_var(bh_accounting
);
3320 EXPORT_SYMBOL(free_buffer_head
);
3322 static void buffer_exit_cpu(int cpu
)
3325 struct bh_lru
*b
= &per_cpu(bh_lrus
, cpu
);
3327 for (i
= 0; i
< BH_LRU_SIZE
; i
++) {
3331 get_cpu_var(bh_accounting
).nr
+= per_cpu(bh_accounting
, cpu
).nr
;
3332 per_cpu(bh_accounting
, cpu
).nr
= 0;
3333 put_cpu_var(bh_accounting
);
3336 static int buffer_cpu_notify(struct notifier_block
*self
,
3337 unsigned long action
, void *hcpu
)
3339 if (action
== CPU_DEAD
|| action
== CPU_DEAD_FROZEN
)
3340 buffer_exit_cpu((unsigned long)hcpu
);
3345 * bh_uptodate_or_lock - Test whether the buffer is uptodate
3346 * @bh: struct buffer_head
3348 * Return true if the buffer is up-to-date and false,
3349 * with the buffer locked, if not.
3351 int bh_uptodate_or_lock(struct buffer_head
*bh
)
3353 if (!buffer_uptodate(bh
)) {
3355 if (!buffer_uptodate(bh
))
3361 EXPORT_SYMBOL(bh_uptodate_or_lock
);
3364 * bh_submit_read - Submit a locked buffer for reading
3365 * @bh: struct buffer_head
3367 * Returns zero on success and -EIO on error.
3369 int bh_submit_read(struct buffer_head
*bh
)
3371 BUG_ON(!buffer_locked(bh
));
3373 if (buffer_uptodate(bh
)) {
3379 bh
->b_end_io
= end_buffer_read_sync
;
3380 submit_bh(READ
, bh
);
3382 if (buffer_uptodate(bh
))
3386 EXPORT_SYMBOL(bh_submit_read
);
3389 init_buffer_head(void *data
)
3391 struct buffer_head
*bh
= data
;
3393 memset(bh
, 0, sizeof(*bh
));
3394 INIT_LIST_HEAD(&bh
->b_assoc_buffers
);
3397 void __init
buffer_init(void)
3401 bh_cachep
= kmem_cache_create("buffer_head",
3402 sizeof(struct buffer_head
), 0,
3403 (SLAB_RECLAIM_ACCOUNT
|SLAB_PANIC
|
3408 * Limit the bh occupancy to 10% of ZONE_NORMAL
3410 nrpages
= (nr_free_buffer_pages() * 10) / 100;
3411 max_buffer_heads
= nrpages
* (PAGE_SIZE
/ sizeof(struct buffer_head
));
3412 hotcpu_notifier(buffer_cpu_notify
, 0);
3415 EXPORT_SYMBOL(__bforget
);
3416 EXPORT_SYMBOL(__brelse
);
3417 EXPORT_SYMBOL(__wait_on_buffer
);
3418 EXPORT_SYMBOL(block_commit_write
);
3419 EXPORT_SYMBOL(block_prepare_write
);
3420 EXPORT_SYMBOL(block_page_mkwrite
);
3421 EXPORT_SYMBOL(block_read_full_page
);
3422 EXPORT_SYMBOL(block_sync_page
);
3423 EXPORT_SYMBOL(block_truncate_page
);
3424 EXPORT_SYMBOL(block_write_full_page
);
3425 EXPORT_SYMBOL(cont_write_begin
);
3426 EXPORT_SYMBOL(end_buffer_read_sync
);
3427 EXPORT_SYMBOL(end_buffer_write_sync
);
3428 EXPORT_SYMBOL(file_fsync
);
3429 EXPORT_SYMBOL(fsync_bdev
);
3430 EXPORT_SYMBOL(generic_block_bmap
);
3431 EXPORT_SYMBOL(generic_cont_expand_simple
);
3432 EXPORT_SYMBOL(init_buffer
);
3433 EXPORT_SYMBOL(invalidate_bdev
);
3434 EXPORT_SYMBOL(ll_rw_block
);
3435 EXPORT_SYMBOL(mark_buffer_dirty
);
3436 EXPORT_SYMBOL(submit_bh
);
3437 EXPORT_SYMBOL(sync_dirty_buffer
);
3438 EXPORT_SYMBOL(unlock_buffer
);