eCryptfs: make open, truncate, and setattr use persistent file
[linux-2.6/x86.git] / fs / buffer.c
blobfaceb5eecca96d5fd8d879640c510e2edb944a85
1 /*
2 * linux/fs/buffer.c
4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
5 */
7 /*
8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
10 * Removed a lot of unnecessary code and simplified things now that
11 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
13 * Speed up hash, lru, and free list operations. Use gfp() for allocating
14 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
16 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
18 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
21 #include <linux/kernel.h>
22 #include <linux/syscalls.h>
23 #include <linux/fs.h>
24 #include <linux/mm.h>
25 #include <linux/percpu.h>
26 #include <linux/slab.h>
27 #include <linux/capability.h>
28 #include <linux/blkdev.h>
29 #include <linux/file.h>
30 #include <linux/quotaops.h>
31 #include <linux/highmem.h>
32 #include <linux/module.h>
33 #include <linux/writeback.h>
34 #include <linux/hash.h>
35 #include <linux/suspend.h>
36 #include <linux/buffer_head.h>
37 #include <linux/task_io_accounting_ops.h>
38 #include <linux/bio.h>
39 #include <linux/notifier.h>
40 #include <linux/cpu.h>
41 #include <linux/bitops.h>
42 #include <linux/mpage.h>
43 #include <linux/bit_spinlock.h>
45 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
47 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
49 inline void
50 init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
52 bh->b_end_io = handler;
53 bh->b_private = private;
56 static int sync_buffer(void *word)
58 struct block_device *bd;
59 struct buffer_head *bh
60 = container_of(word, struct buffer_head, b_state);
62 smp_mb();
63 bd = bh->b_bdev;
64 if (bd)
65 blk_run_address_space(bd->bd_inode->i_mapping);
66 io_schedule();
67 return 0;
70 void fastcall __lock_buffer(struct buffer_head *bh)
72 wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
73 TASK_UNINTERRUPTIBLE);
75 EXPORT_SYMBOL(__lock_buffer);
77 void fastcall unlock_buffer(struct buffer_head *bh)
79 smp_mb__before_clear_bit();
80 clear_buffer_locked(bh);
81 smp_mb__after_clear_bit();
82 wake_up_bit(&bh->b_state, BH_Lock);
86 * Block until a buffer comes unlocked. This doesn't stop it
87 * from becoming locked again - you have to lock it yourself
88 * if you want to preserve its state.
90 void __wait_on_buffer(struct buffer_head * bh)
92 wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
95 static void
96 __clear_page_buffers(struct page *page)
98 ClearPagePrivate(page);
99 set_page_private(page, 0);
100 page_cache_release(page);
103 static void buffer_io_error(struct buffer_head *bh)
105 char b[BDEVNAME_SIZE];
107 printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
108 bdevname(bh->b_bdev, b),
109 (unsigned long long)bh->b_blocknr);
113 * End-of-IO handler helper function which does not touch the bh after
114 * unlocking it.
115 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
116 * a race there is benign: unlock_buffer() only use the bh's address for
117 * hashing after unlocking the buffer, so it doesn't actually touch the bh
118 * itself.
120 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
122 if (uptodate) {
123 set_buffer_uptodate(bh);
124 } else {
125 /* This happens, due to failed READA attempts. */
126 clear_buffer_uptodate(bh);
128 unlock_buffer(bh);
132 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
133 * unlock the buffer. This is what ll_rw_block uses too.
135 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
137 __end_buffer_read_notouch(bh, uptodate);
138 put_bh(bh);
141 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
143 char b[BDEVNAME_SIZE];
145 if (uptodate) {
146 set_buffer_uptodate(bh);
147 } else {
148 if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
149 buffer_io_error(bh);
150 printk(KERN_WARNING "lost page write due to "
151 "I/O error on %s\n",
152 bdevname(bh->b_bdev, b));
154 set_buffer_write_io_error(bh);
155 clear_buffer_uptodate(bh);
157 unlock_buffer(bh);
158 put_bh(bh);
162 * Write out and wait upon all the dirty data associated with a block
163 * device via its mapping. Does not take the superblock lock.
165 int sync_blockdev(struct block_device *bdev)
167 int ret = 0;
169 if (bdev)
170 ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
171 return ret;
173 EXPORT_SYMBOL(sync_blockdev);
176 * Write out and wait upon all dirty data associated with this
177 * device. Filesystem data as well as the underlying block
178 * device. Takes the superblock lock.
180 int fsync_bdev(struct block_device *bdev)
182 struct super_block *sb = get_super(bdev);
183 if (sb) {
184 int res = fsync_super(sb);
185 drop_super(sb);
186 return res;
188 return sync_blockdev(bdev);
192 * freeze_bdev -- lock a filesystem and force it into a consistent state
193 * @bdev: blockdevice to lock
195 * This takes the block device bd_mount_sem to make sure no new mounts
196 * happen on bdev until thaw_bdev() is called.
197 * If a superblock is found on this device, we take the s_umount semaphore
198 * on it to make sure nobody unmounts until the snapshot creation is done.
200 struct super_block *freeze_bdev(struct block_device *bdev)
202 struct super_block *sb;
204 down(&bdev->bd_mount_sem);
205 sb = get_super(bdev);
206 if (sb && !(sb->s_flags & MS_RDONLY)) {
207 sb->s_frozen = SB_FREEZE_WRITE;
208 smp_wmb();
210 __fsync_super(sb);
212 sb->s_frozen = SB_FREEZE_TRANS;
213 smp_wmb();
215 sync_blockdev(sb->s_bdev);
217 if (sb->s_op->write_super_lockfs)
218 sb->s_op->write_super_lockfs(sb);
221 sync_blockdev(bdev);
222 return sb; /* thaw_bdev releases s->s_umount and bd_mount_sem */
224 EXPORT_SYMBOL(freeze_bdev);
227 * thaw_bdev -- unlock filesystem
228 * @bdev: blockdevice to unlock
229 * @sb: associated superblock
231 * Unlocks the filesystem and marks it writeable again after freeze_bdev().
233 void thaw_bdev(struct block_device *bdev, struct super_block *sb)
235 if (sb) {
236 BUG_ON(sb->s_bdev != bdev);
238 if (sb->s_op->unlockfs)
239 sb->s_op->unlockfs(sb);
240 sb->s_frozen = SB_UNFROZEN;
241 smp_wmb();
242 wake_up(&sb->s_wait_unfrozen);
243 drop_super(sb);
246 up(&bdev->bd_mount_sem);
248 EXPORT_SYMBOL(thaw_bdev);
251 * Various filesystems appear to want __find_get_block to be non-blocking.
252 * But it's the page lock which protects the buffers. To get around this,
253 * we get exclusion from try_to_free_buffers with the blockdev mapping's
254 * private_lock.
256 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
257 * may be quite high. This code could TryLock the page, and if that
258 * succeeds, there is no need to take private_lock. (But if
259 * private_lock is contended then so is mapping->tree_lock).
261 static struct buffer_head *
262 __find_get_block_slow(struct block_device *bdev, sector_t block)
264 struct inode *bd_inode = bdev->bd_inode;
265 struct address_space *bd_mapping = bd_inode->i_mapping;
266 struct buffer_head *ret = NULL;
267 pgoff_t index;
268 struct buffer_head *bh;
269 struct buffer_head *head;
270 struct page *page;
271 int all_mapped = 1;
273 index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
274 page = find_get_page(bd_mapping, index);
275 if (!page)
276 goto out;
278 spin_lock(&bd_mapping->private_lock);
279 if (!page_has_buffers(page))
280 goto out_unlock;
281 head = page_buffers(page);
282 bh = head;
283 do {
284 if (bh->b_blocknr == block) {
285 ret = bh;
286 get_bh(bh);
287 goto out_unlock;
289 if (!buffer_mapped(bh))
290 all_mapped = 0;
291 bh = bh->b_this_page;
292 } while (bh != head);
294 /* we might be here because some of the buffers on this page are
295 * not mapped. This is due to various races between
296 * file io on the block device and getblk. It gets dealt with
297 * elsewhere, don't buffer_error if we had some unmapped buffers
299 if (all_mapped) {
300 printk("__find_get_block_slow() failed. "
301 "block=%llu, b_blocknr=%llu\n",
302 (unsigned long long)block,
303 (unsigned long long)bh->b_blocknr);
304 printk("b_state=0x%08lx, b_size=%zu\n",
305 bh->b_state, bh->b_size);
306 printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
308 out_unlock:
309 spin_unlock(&bd_mapping->private_lock);
310 page_cache_release(page);
311 out:
312 return ret;
315 /* If invalidate_buffers() will trash dirty buffers, it means some kind
316 of fs corruption is going on. Trashing dirty data always imply losing
317 information that was supposed to be just stored on the physical layer
318 by the user.
320 Thus invalidate_buffers in general usage is not allwowed to trash
321 dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
322 be preserved. These buffers are simply skipped.
324 We also skip buffers which are still in use. For example this can
325 happen if a userspace program is reading the block device.
327 NOTE: In the case where the user removed a removable-media-disk even if
328 there's still dirty data not synced on disk (due a bug in the device driver
329 or due an error of the user), by not destroying the dirty buffers we could
330 generate corruption also on the next media inserted, thus a parameter is
331 necessary to handle this case in the most safe way possible (trying
332 to not corrupt also the new disk inserted with the data belonging to
333 the old now corrupted disk). Also for the ramdisk the natural thing
334 to do in order to release the ramdisk memory is to destroy dirty buffers.
336 These are two special cases. Normal usage imply the device driver
337 to issue a sync on the device (without waiting I/O completion) and
338 then an invalidate_buffers call that doesn't trash dirty buffers.
340 For handling cache coherency with the blkdev pagecache the 'update' case
341 is been introduced. It is needed to re-read from disk any pinned
342 buffer. NOTE: re-reading from disk is destructive so we can do it only
343 when we assume nobody is changing the buffercache under our I/O and when
344 we think the disk contains more recent information than the buffercache.
345 The update == 1 pass marks the buffers we need to update, the update == 2
346 pass does the actual I/O. */
347 void invalidate_bdev(struct block_device *bdev)
349 struct address_space *mapping = bdev->bd_inode->i_mapping;
351 if (mapping->nrpages == 0)
352 return;
354 invalidate_bh_lrus();
355 invalidate_mapping_pages(mapping, 0, -1);
359 * Kick pdflush then try to free up some ZONE_NORMAL memory.
361 static void free_more_memory(void)
363 struct zone **zones;
364 pg_data_t *pgdat;
366 wakeup_pdflush(1024);
367 yield();
369 for_each_online_pgdat(pgdat) {
370 zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones;
371 if (*zones)
372 try_to_free_pages(zones, 0, GFP_NOFS);
377 * I/O completion handler for block_read_full_page() - pages
378 * which come unlocked at the end of I/O.
380 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
382 unsigned long flags;
383 struct buffer_head *first;
384 struct buffer_head *tmp;
385 struct page *page;
386 int page_uptodate = 1;
388 BUG_ON(!buffer_async_read(bh));
390 page = bh->b_page;
391 if (uptodate) {
392 set_buffer_uptodate(bh);
393 } else {
394 clear_buffer_uptodate(bh);
395 if (printk_ratelimit())
396 buffer_io_error(bh);
397 SetPageError(page);
401 * Be _very_ careful from here on. Bad things can happen if
402 * two buffer heads end IO at almost the same time and both
403 * decide that the page is now completely done.
405 first = page_buffers(page);
406 local_irq_save(flags);
407 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
408 clear_buffer_async_read(bh);
409 unlock_buffer(bh);
410 tmp = bh;
411 do {
412 if (!buffer_uptodate(tmp))
413 page_uptodate = 0;
414 if (buffer_async_read(tmp)) {
415 BUG_ON(!buffer_locked(tmp));
416 goto still_busy;
418 tmp = tmp->b_this_page;
419 } while (tmp != bh);
420 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
421 local_irq_restore(flags);
424 * If none of the buffers had errors and they are all
425 * uptodate then we can set the page uptodate.
427 if (page_uptodate && !PageError(page))
428 SetPageUptodate(page);
429 unlock_page(page);
430 return;
432 still_busy:
433 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
434 local_irq_restore(flags);
435 return;
439 * Completion handler for block_write_full_page() - pages which are unlocked
440 * during I/O, and which have PageWriteback cleared upon I/O completion.
442 static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
444 char b[BDEVNAME_SIZE];
445 unsigned long flags;
446 struct buffer_head *first;
447 struct buffer_head *tmp;
448 struct page *page;
450 BUG_ON(!buffer_async_write(bh));
452 page = bh->b_page;
453 if (uptodate) {
454 set_buffer_uptodate(bh);
455 } else {
456 if (printk_ratelimit()) {
457 buffer_io_error(bh);
458 printk(KERN_WARNING "lost page write due to "
459 "I/O error on %s\n",
460 bdevname(bh->b_bdev, b));
462 set_bit(AS_EIO, &page->mapping->flags);
463 set_buffer_write_io_error(bh);
464 clear_buffer_uptodate(bh);
465 SetPageError(page);
468 first = page_buffers(page);
469 local_irq_save(flags);
470 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
472 clear_buffer_async_write(bh);
473 unlock_buffer(bh);
474 tmp = bh->b_this_page;
475 while (tmp != bh) {
476 if (buffer_async_write(tmp)) {
477 BUG_ON(!buffer_locked(tmp));
478 goto still_busy;
480 tmp = tmp->b_this_page;
482 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
483 local_irq_restore(flags);
484 end_page_writeback(page);
485 return;
487 still_busy:
488 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
489 local_irq_restore(flags);
490 return;
494 * If a page's buffers are under async readin (end_buffer_async_read
495 * completion) then there is a possibility that another thread of
496 * control could lock one of the buffers after it has completed
497 * but while some of the other buffers have not completed. This
498 * locked buffer would confuse end_buffer_async_read() into not unlocking
499 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
500 * that this buffer is not under async I/O.
502 * The page comes unlocked when it has no locked buffer_async buffers
503 * left.
505 * PageLocked prevents anyone starting new async I/O reads any of
506 * the buffers.
508 * PageWriteback is used to prevent simultaneous writeout of the same
509 * page.
511 * PageLocked prevents anyone from starting writeback of a page which is
512 * under read I/O (PageWriteback is only ever set against a locked page).
514 static void mark_buffer_async_read(struct buffer_head *bh)
516 bh->b_end_io = end_buffer_async_read;
517 set_buffer_async_read(bh);
520 void mark_buffer_async_write(struct buffer_head *bh)
522 bh->b_end_io = end_buffer_async_write;
523 set_buffer_async_write(bh);
525 EXPORT_SYMBOL(mark_buffer_async_write);
529 * fs/buffer.c contains helper functions for buffer-backed address space's
530 * fsync functions. A common requirement for buffer-based filesystems is
531 * that certain data from the backing blockdev needs to be written out for
532 * a successful fsync(). For example, ext2 indirect blocks need to be
533 * written back and waited upon before fsync() returns.
535 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
536 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
537 * management of a list of dependent buffers at ->i_mapping->private_list.
539 * Locking is a little subtle: try_to_free_buffers() will remove buffers
540 * from their controlling inode's queue when they are being freed. But
541 * try_to_free_buffers() will be operating against the *blockdev* mapping
542 * at the time, not against the S_ISREG file which depends on those buffers.
543 * So the locking for private_list is via the private_lock in the address_space
544 * which backs the buffers. Which is different from the address_space
545 * against which the buffers are listed. So for a particular address_space,
546 * mapping->private_lock does *not* protect mapping->private_list! In fact,
547 * mapping->private_list will always be protected by the backing blockdev's
548 * ->private_lock.
550 * Which introduces a requirement: all buffers on an address_space's
551 * ->private_list must be from the same address_space: the blockdev's.
553 * address_spaces which do not place buffers at ->private_list via these
554 * utility functions are free to use private_lock and private_list for
555 * whatever they want. The only requirement is that list_empty(private_list)
556 * be true at clear_inode() time.
558 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
559 * filesystems should do that. invalidate_inode_buffers() should just go
560 * BUG_ON(!list_empty).
562 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
563 * take an address_space, not an inode. And it should be called
564 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
565 * queued up.
567 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
568 * list if it is already on a list. Because if the buffer is on a list,
569 * it *must* already be on the right one. If not, the filesystem is being
570 * silly. This will save a ton of locking. But first we have to ensure
571 * that buffers are taken *off* the old inode's list when they are freed
572 * (presumably in truncate). That requires careful auditing of all
573 * filesystems (do it inside bforget()). It could also be done by bringing
574 * b_inode back.
578 * The buffer's backing address_space's private_lock must be held
580 static inline void __remove_assoc_queue(struct buffer_head *bh)
582 list_del_init(&bh->b_assoc_buffers);
583 WARN_ON(!bh->b_assoc_map);
584 if (buffer_write_io_error(bh))
585 set_bit(AS_EIO, &bh->b_assoc_map->flags);
586 bh->b_assoc_map = NULL;
589 int inode_has_buffers(struct inode *inode)
591 return !list_empty(&inode->i_data.private_list);
595 * osync is designed to support O_SYNC io. It waits synchronously for
596 * all already-submitted IO to complete, but does not queue any new
597 * writes to the disk.
599 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
600 * you dirty the buffers, and then use osync_inode_buffers to wait for
601 * completion. Any other dirty buffers which are not yet queued for
602 * write will not be flushed to disk by the osync.
604 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
606 struct buffer_head *bh;
607 struct list_head *p;
608 int err = 0;
610 spin_lock(lock);
611 repeat:
612 list_for_each_prev(p, list) {
613 bh = BH_ENTRY(p);
614 if (buffer_locked(bh)) {
615 get_bh(bh);
616 spin_unlock(lock);
617 wait_on_buffer(bh);
618 if (!buffer_uptodate(bh))
619 err = -EIO;
620 brelse(bh);
621 spin_lock(lock);
622 goto repeat;
625 spin_unlock(lock);
626 return err;
630 * sync_mapping_buffers - write out and wait upon a mapping's "associated"
631 * buffers
632 * @mapping: the mapping which wants those buffers written
634 * Starts I/O against the buffers at mapping->private_list, and waits upon
635 * that I/O.
637 * Basically, this is a convenience function for fsync().
638 * @mapping is a file or directory which needs those buffers to be written for
639 * a successful fsync().
641 int sync_mapping_buffers(struct address_space *mapping)
643 struct address_space *buffer_mapping = mapping->assoc_mapping;
645 if (buffer_mapping == NULL || list_empty(&mapping->private_list))
646 return 0;
648 return fsync_buffers_list(&buffer_mapping->private_lock,
649 &mapping->private_list);
651 EXPORT_SYMBOL(sync_mapping_buffers);
654 * Called when we've recently written block `bblock', and it is known that
655 * `bblock' was for a buffer_boundary() buffer. This means that the block at
656 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
657 * dirty, schedule it for IO. So that indirects merge nicely with their data.
659 void write_boundary_block(struct block_device *bdev,
660 sector_t bblock, unsigned blocksize)
662 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
663 if (bh) {
664 if (buffer_dirty(bh))
665 ll_rw_block(WRITE, 1, &bh);
666 put_bh(bh);
670 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
672 struct address_space *mapping = inode->i_mapping;
673 struct address_space *buffer_mapping = bh->b_page->mapping;
675 mark_buffer_dirty(bh);
676 if (!mapping->assoc_mapping) {
677 mapping->assoc_mapping = buffer_mapping;
678 } else {
679 BUG_ON(mapping->assoc_mapping != buffer_mapping);
681 if (list_empty(&bh->b_assoc_buffers)) {
682 spin_lock(&buffer_mapping->private_lock);
683 list_move_tail(&bh->b_assoc_buffers,
684 &mapping->private_list);
685 bh->b_assoc_map = mapping;
686 spin_unlock(&buffer_mapping->private_lock);
689 EXPORT_SYMBOL(mark_buffer_dirty_inode);
692 * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
693 * dirty.
695 * If warn is true, then emit a warning if the page is not uptodate and has
696 * not been truncated.
698 static int __set_page_dirty(struct page *page,
699 struct address_space *mapping, int warn)
701 if (unlikely(!mapping))
702 return !TestSetPageDirty(page);
704 if (TestSetPageDirty(page))
705 return 0;
707 write_lock_irq(&mapping->tree_lock);
708 if (page->mapping) { /* Race with truncate? */
709 WARN_ON_ONCE(warn && !PageUptodate(page));
711 if (mapping_cap_account_dirty(mapping)) {
712 __inc_zone_page_state(page, NR_FILE_DIRTY);
713 task_io_account_write(PAGE_CACHE_SIZE);
715 radix_tree_tag_set(&mapping->page_tree,
716 page_index(page), PAGECACHE_TAG_DIRTY);
718 write_unlock_irq(&mapping->tree_lock);
719 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
721 return 1;
725 * Add a page to the dirty page list.
727 * It is a sad fact of life that this function is called from several places
728 * deeply under spinlocking. It may not sleep.
730 * If the page has buffers, the uptodate buffers are set dirty, to preserve
731 * dirty-state coherency between the page and the buffers. It the page does
732 * not have buffers then when they are later attached they will all be set
733 * dirty.
735 * The buffers are dirtied before the page is dirtied. There's a small race
736 * window in which a writepage caller may see the page cleanness but not the
737 * buffer dirtiness. That's fine. If this code were to set the page dirty
738 * before the buffers, a concurrent writepage caller could clear the page dirty
739 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
740 * page on the dirty page list.
742 * We use private_lock to lock against try_to_free_buffers while using the
743 * page's buffer list. Also use this to protect against clean buffers being
744 * added to the page after it was set dirty.
746 * FIXME: may need to call ->reservepage here as well. That's rather up to the
747 * address_space though.
749 int __set_page_dirty_buffers(struct page *page)
751 struct address_space *mapping = page_mapping(page);
753 if (unlikely(!mapping))
754 return !TestSetPageDirty(page);
756 spin_lock(&mapping->private_lock);
757 if (page_has_buffers(page)) {
758 struct buffer_head *head = page_buffers(page);
759 struct buffer_head *bh = head;
761 do {
762 set_buffer_dirty(bh);
763 bh = bh->b_this_page;
764 } while (bh != head);
766 spin_unlock(&mapping->private_lock);
768 return __set_page_dirty(page, mapping, 1);
770 EXPORT_SYMBOL(__set_page_dirty_buffers);
773 * Write out and wait upon a list of buffers.
775 * We have conflicting pressures: we want to make sure that all
776 * initially dirty buffers get waited on, but that any subsequently
777 * dirtied buffers don't. After all, we don't want fsync to last
778 * forever if somebody is actively writing to the file.
780 * Do this in two main stages: first we copy dirty buffers to a
781 * temporary inode list, queueing the writes as we go. Then we clean
782 * up, waiting for those writes to complete.
784 * During this second stage, any subsequent updates to the file may end
785 * up refiling the buffer on the original inode's dirty list again, so
786 * there is a chance we will end up with a buffer queued for write but
787 * not yet completed on that list. So, as a final cleanup we go through
788 * the osync code to catch these locked, dirty buffers without requeuing
789 * any newly dirty buffers for write.
791 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
793 struct buffer_head *bh;
794 struct list_head tmp;
795 int err = 0, err2;
797 INIT_LIST_HEAD(&tmp);
799 spin_lock(lock);
800 while (!list_empty(list)) {
801 bh = BH_ENTRY(list->next);
802 __remove_assoc_queue(bh);
803 if (buffer_dirty(bh) || buffer_locked(bh)) {
804 list_add(&bh->b_assoc_buffers, &tmp);
805 if (buffer_dirty(bh)) {
806 get_bh(bh);
807 spin_unlock(lock);
809 * Ensure any pending I/O completes so that
810 * ll_rw_block() actually writes the current
811 * contents - it is a noop if I/O is still in
812 * flight on potentially older contents.
814 ll_rw_block(SWRITE, 1, &bh);
815 brelse(bh);
816 spin_lock(lock);
821 while (!list_empty(&tmp)) {
822 bh = BH_ENTRY(tmp.prev);
823 list_del_init(&bh->b_assoc_buffers);
824 get_bh(bh);
825 spin_unlock(lock);
826 wait_on_buffer(bh);
827 if (!buffer_uptodate(bh))
828 err = -EIO;
829 brelse(bh);
830 spin_lock(lock);
833 spin_unlock(lock);
834 err2 = osync_buffers_list(lock, list);
835 if (err)
836 return err;
837 else
838 return err2;
842 * Invalidate any and all dirty buffers on a given inode. We are
843 * probably unmounting the fs, but that doesn't mean we have already
844 * done a sync(). Just drop the buffers from the inode list.
846 * NOTE: we take the inode's blockdev's mapping's private_lock. Which
847 * assumes that all the buffers are against the blockdev. Not true
848 * for reiserfs.
850 void invalidate_inode_buffers(struct inode *inode)
852 if (inode_has_buffers(inode)) {
853 struct address_space *mapping = &inode->i_data;
854 struct list_head *list = &mapping->private_list;
855 struct address_space *buffer_mapping = mapping->assoc_mapping;
857 spin_lock(&buffer_mapping->private_lock);
858 while (!list_empty(list))
859 __remove_assoc_queue(BH_ENTRY(list->next));
860 spin_unlock(&buffer_mapping->private_lock);
865 * Remove any clean buffers from the inode's buffer list. This is called
866 * when we're trying to free the inode itself. Those buffers can pin it.
868 * Returns true if all buffers were removed.
870 int remove_inode_buffers(struct inode *inode)
872 int ret = 1;
874 if (inode_has_buffers(inode)) {
875 struct address_space *mapping = &inode->i_data;
876 struct list_head *list = &mapping->private_list;
877 struct address_space *buffer_mapping = mapping->assoc_mapping;
879 spin_lock(&buffer_mapping->private_lock);
880 while (!list_empty(list)) {
881 struct buffer_head *bh = BH_ENTRY(list->next);
882 if (buffer_dirty(bh)) {
883 ret = 0;
884 break;
886 __remove_assoc_queue(bh);
888 spin_unlock(&buffer_mapping->private_lock);
890 return ret;
894 * Create the appropriate buffers when given a page for data area and
895 * the size of each buffer.. Use the bh->b_this_page linked list to
896 * follow the buffers created. Return NULL if unable to create more
897 * buffers.
899 * The retry flag is used to differentiate async IO (paging, swapping)
900 * which may not fail from ordinary buffer allocations.
902 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
903 int retry)
905 struct buffer_head *bh, *head;
906 long offset;
908 try_again:
909 head = NULL;
910 offset = PAGE_SIZE;
911 while ((offset -= size) >= 0) {
912 bh = alloc_buffer_head(GFP_NOFS);
913 if (!bh)
914 goto no_grow;
916 bh->b_bdev = NULL;
917 bh->b_this_page = head;
918 bh->b_blocknr = -1;
919 head = bh;
921 bh->b_state = 0;
922 atomic_set(&bh->b_count, 0);
923 bh->b_private = NULL;
924 bh->b_size = size;
926 /* Link the buffer to its page */
927 set_bh_page(bh, page, offset);
929 init_buffer(bh, NULL, NULL);
931 return head;
933 * In case anything failed, we just free everything we got.
935 no_grow:
936 if (head) {
937 do {
938 bh = head;
939 head = head->b_this_page;
940 free_buffer_head(bh);
941 } while (head);
945 * Return failure for non-async IO requests. Async IO requests
946 * are not allowed to fail, so we have to wait until buffer heads
947 * become available. But we don't want tasks sleeping with
948 * partially complete buffers, so all were released above.
950 if (!retry)
951 return NULL;
953 /* We're _really_ low on memory. Now we just
954 * wait for old buffer heads to become free due to
955 * finishing IO. Since this is an async request and
956 * the reserve list is empty, we're sure there are
957 * async buffer heads in use.
959 free_more_memory();
960 goto try_again;
962 EXPORT_SYMBOL_GPL(alloc_page_buffers);
964 static inline void
965 link_dev_buffers(struct page *page, struct buffer_head *head)
967 struct buffer_head *bh, *tail;
969 bh = head;
970 do {
971 tail = bh;
972 bh = bh->b_this_page;
973 } while (bh);
974 tail->b_this_page = head;
975 attach_page_buffers(page, head);
979 * Initialise the state of a blockdev page's buffers.
981 static void
982 init_page_buffers(struct page *page, struct block_device *bdev,
983 sector_t block, int size)
985 struct buffer_head *head = page_buffers(page);
986 struct buffer_head *bh = head;
987 int uptodate = PageUptodate(page);
989 do {
990 if (!buffer_mapped(bh)) {
991 init_buffer(bh, NULL, NULL);
992 bh->b_bdev = bdev;
993 bh->b_blocknr = block;
994 if (uptodate)
995 set_buffer_uptodate(bh);
996 set_buffer_mapped(bh);
998 block++;
999 bh = bh->b_this_page;
1000 } while (bh != head);
1004 * Create the page-cache page that contains the requested block.
1006 * This is user purely for blockdev mappings.
1008 static struct page *
1009 grow_dev_page(struct block_device *bdev, sector_t block,
1010 pgoff_t index, int size)
1012 struct inode *inode = bdev->bd_inode;
1013 struct page *page;
1014 struct buffer_head *bh;
1016 page = find_or_create_page(inode->i_mapping, index,
1017 (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
1018 if (!page)
1019 return NULL;
1021 BUG_ON(!PageLocked(page));
1023 if (page_has_buffers(page)) {
1024 bh = page_buffers(page);
1025 if (bh->b_size == size) {
1026 init_page_buffers(page, bdev, block, size);
1027 return page;
1029 if (!try_to_free_buffers(page))
1030 goto failed;
1034 * Allocate some buffers for this page
1036 bh = alloc_page_buffers(page, size, 0);
1037 if (!bh)
1038 goto failed;
1041 * Link the page to the buffers and initialise them. Take the
1042 * lock to be atomic wrt __find_get_block(), which does not
1043 * run under the page lock.
1045 spin_lock(&inode->i_mapping->private_lock);
1046 link_dev_buffers(page, bh);
1047 init_page_buffers(page, bdev, block, size);
1048 spin_unlock(&inode->i_mapping->private_lock);
1049 return page;
1051 failed:
1052 BUG();
1053 unlock_page(page);
1054 page_cache_release(page);
1055 return NULL;
1059 * Create buffers for the specified block device block's page. If
1060 * that page was dirty, the buffers are set dirty also.
1062 static int
1063 grow_buffers(struct block_device *bdev, sector_t block, int size)
1065 struct page *page;
1066 pgoff_t index;
1067 int sizebits;
1069 sizebits = -1;
1070 do {
1071 sizebits++;
1072 } while ((size << sizebits) < PAGE_SIZE);
1074 index = block >> sizebits;
1077 * Check for a block which wants to lie outside our maximum possible
1078 * pagecache index. (this comparison is done using sector_t types).
1080 if (unlikely(index != block >> sizebits)) {
1081 char b[BDEVNAME_SIZE];
1083 printk(KERN_ERR "%s: requested out-of-range block %llu for "
1084 "device %s\n",
1085 __FUNCTION__, (unsigned long long)block,
1086 bdevname(bdev, b));
1087 return -EIO;
1089 block = index << sizebits;
1090 /* Create a page with the proper size buffers.. */
1091 page = grow_dev_page(bdev, block, index, size);
1092 if (!page)
1093 return 0;
1094 unlock_page(page);
1095 page_cache_release(page);
1096 return 1;
1099 static struct buffer_head *
1100 __getblk_slow(struct block_device *bdev, sector_t block, int size)
1102 /* Size must be multiple of hard sectorsize */
1103 if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
1104 (size < 512 || size > PAGE_SIZE))) {
1105 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1106 size);
1107 printk(KERN_ERR "hardsect size: %d\n",
1108 bdev_hardsect_size(bdev));
1110 dump_stack();
1111 return NULL;
1114 for (;;) {
1115 struct buffer_head * bh;
1116 int ret;
1118 bh = __find_get_block(bdev, block, size);
1119 if (bh)
1120 return bh;
1122 ret = grow_buffers(bdev, block, size);
1123 if (ret < 0)
1124 return NULL;
1125 if (ret == 0)
1126 free_more_memory();
1131 * The relationship between dirty buffers and dirty pages:
1133 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1134 * the page is tagged dirty in its radix tree.
1136 * At all times, the dirtiness of the buffers represents the dirtiness of
1137 * subsections of the page. If the page has buffers, the page dirty bit is
1138 * merely a hint about the true dirty state.
1140 * When a page is set dirty in its entirety, all its buffers are marked dirty
1141 * (if the page has buffers).
1143 * When a buffer is marked dirty, its page is dirtied, but the page's other
1144 * buffers are not.
1146 * Also. When blockdev buffers are explicitly read with bread(), they
1147 * individually become uptodate. But their backing page remains not
1148 * uptodate - even if all of its buffers are uptodate. A subsequent
1149 * block_read_full_page() against that page will discover all the uptodate
1150 * buffers, will set the page uptodate and will perform no I/O.
1154 * mark_buffer_dirty - mark a buffer_head as needing writeout
1155 * @bh: the buffer_head to mark dirty
1157 * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1158 * backing page dirty, then tag the page as dirty in its address_space's radix
1159 * tree and then attach the address_space's inode to its superblock's dirty
1160 * inode list.
1162 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1163 * mapping->tree_lock and the global inode_lock.
1165 void fastcall mark_buffer_dirty(struct buffer_head *bh)
1167 WARN_ON_ONCE(!buffer_uptodate(bh));
1168 if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
1169 __set_page_dirty(bh->b_page, page_mapping(bh->b_page), 0);
1173 * Decrement a buffer_head's reference count. If all buffers against a page
1174 * have zero reference count, are clean and unlocked, and if the page is clean
1175 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1176 * in preparation for freeing it (sometimes, rarely, buffers are removed from
1177 * a page but it ends up not being freed, and buffers may later be reattached).
1179 void __brelse(struct buffer_head * buf)
1181 if (atomic_read(&buf->b_count)) {
1182 put_bh(buf);
1183 return;
1185 printk(KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1186 WARN_ON(1);
1190 * bforget() is like brelse(), except it discards any
1191 * potentially dirty data.
1193 void __bforget(struct buffer_head *bh)
1195 clear_buffer_dirty(bh);
1196 if (!list_empty(&bh->b_assoc_buffers)) {
1197 struct address_space *buffer_mapping = bh->b_page->mapping;
1199 spin_lock(&buffer_mapping->private_lock);
1200 list_del_init(&bh->b_assoc_buffers);
1201 bh->b_assoc_map = NULL;
1202 spin_unlock(&buffer_mapping->private_lock);
1204 __brelse(bh);
1207 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1209 lock_buffer(bh);
1210 if (buffer_uptodate(bh)) {
1211 unlock_buffer(bh);
1212 return bh;
1213 } else {
1214 get_bh(bh);
1215 bh->b_end_io = end_buffer_read_sync;
1216 submit_bh(READ, bh);
1217 wait_on_buffer(bh);
1218 if (buffer_uptodate(bh))
1219 return bh;
1221 brelse(bh);
1222 return NULL;
1226 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1227 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1228 * refcount elevated by one when they're in an LRU. A buffer can only appear
1229 * once in a particular CPU's LRU. A single buffer can be present in multiple
1230 * CPU's LRUs at the same time.
1232 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1233 * sb_find_get_block().
1235 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1236 * a local interrupt disable for that.
1239 #define BH_LRU_SIZE 8
1241 struct bh_lru {
1242 struct buffer_head *bhs[BH_LRU_SIZE];
1245 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1247 #ifdef CONFIG_SMP
1248 #define bh_lru_lock() local_irq_disable()
1249 #define bh_lru_unlock() local_irq_enable()
1250 #else
1251 #define bh_lru_lock() preempt_disable()
1252 #define bh_lru_unlock() preempt_enable()
1253 #endif
1255 static inline void check_irqs_on(void)
1257 #ifdef irqs_disabled
1258 BUG_ON(irqs_disabled());
1259 #endif
1263 * The LRU management algorithm is dopey-but-simple. Sorry.
1265 static void bh_lru_install(struct buffer_head *bh)
1267 struct buffer_head *evictee = NULL;
1268 struct bh_lru *lru;
1270 check_irqs_on();
1271 bh_lru_lock();
1272 lru = &__get_cpu_var(bh_lrus);
1273 if (lru->bhs[0] != bh) {
1274 struct buffer_head *bhs[BH_LRU_SIZE];
1275 int in;
1276 int out = 0;
1278 get_bh(bh);
1279 bhs[out++] = bh;
1280 for (in = 0; in < BH_LRU_SIZE; in++) {
1281 struct buffer_head *bh2 = lru->bhs[in];
1283 if (bh2 == bh) {
1284 __brelse(bh2);
1285 } else {
1286 if (out >= BH_LRU_SIZE) {
1287 BUG_ON(evictee != NULL);
1288 evictee = bh2;
1289 } else {
1290 bhs[out++] = bh2;
1294 while (out < BH_LRU_SIZE)
1295 bhs[out++] = NULL;
1296 memcpy(lru->bhs, bhs, sizeof(bhs));
1298 bh_lru_unlock();
1300 if (evictee)
1301 __brelse(evictee);
1305 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1307 static struct buffer_head *
1308 lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1310 struct buffer_head *ret = NULL;
1311 struct bh_lru *lru;
1312 unsigned int i;
1314 check_irqs_on();
1315 bh_lru_lock();
1316 lru = &__get_cpu_var(bh_lrus);
1317 for (i = 0; i < BH_LRU_SIZE; i++) {
1318 struct buffer_head *bh = lru->bhs[i];
1320 if (bh && bh->b_bdev == bdev &&
1321 bh->b_blocknr == block && bh->b_size == size) {
1322 if (i) {
1323 while (i) {
1324 lru->bhs[i] = lru->bhs[i - 1];
1325 i--;
1327 lru->bhs[0] = bh;
1329 get_bh(bh);
1330 ret = bh;
1331 break;
1334 bh_lru_unlock();
1335 return ret;
1339 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1340 * it in the LRU and mark it as accessed. If it is not present then return
1341 * NULL
1343 struct buffer_head *
1344 __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1346 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1348 if (bh == NULL) {
1349 bh = __find_get_block_slow(bdev, block);
1350 if (bh)
1351 bh_lru_install(bh);
1353 if (bh)
1354 touch_buffer(bh);
1355 return bh;
1357 EXPORT_SYMBOL(__find_get_block);
1360 * __getblk will locate (and, if necessary, create) the buffer_head
1361 * which corresponds to the passed block_device, block and size. The
1362 * returned buffer has its reference count incremented.
1364 * __getblk() cannot fail - it just keeps trying. If you pass it an
1365 * illegal block number, __getblk() will happily return a buffer_head
1366 * which represents the non-existent block. Very weird.
1368 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1369 * attempt is failing. FIXME, perhaps?
1371 struct buffer_head *
1372 __getblk(struct block_device *bdev, sector_t block, unsigned size)
1374 struct buffer_head *bh = __find_get_block(bdev, block, size);
1376 might_sleep();
1377 if (bh == NULL)
1378 bh = __getblk_slow(bdev, block, size);
1379 return bh;
1381 EXPORT_SYMBOL(__getblk);
1384 * Do async read-ahead on a buffer..
1386 void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1388 struct buffer_head *bh = __getblk(bdev, block, size);
1389 if (likely(bh)) {
1390 ll_rw_block(READA, 1, &bh);
1391 brelse(bh);
1394 EXPORT_SYMBOL(__breadahead);
1397 * __bread() - reads a specified block and returns the bh
1398 * @bdev: the block_device to read from
1399 * @block: number of block
1400 * @size: size (in bytes) to read
1402 * Reads a specified block, and returns buffer head that contains it.
1403 * It returns NULL if the block was unreadable.
1405 struct buffer_head *
1406 __bread(struct block_device *bdev, sector_t block, unsigned size)
1408 struct buffer_head *bh = __getblk(bdev, block, size);
1410 if (likely(bh) && !buffer_uptodate(bh))
1411 bh = __bread_slow(bh);
1412 return bh;
1414 EXPORT_SYMBOL(__bread);
1417 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1418 * This doesn't race because it runs in each cpu either in irq
1419 * or with preempt disabled.
1421 static void invalidate_bh_lru(void *arg)
1423 struct bh_lru *b = &get_cpu_var(bh_lrus);
1424 int i;
1426 for (i = 0; i < BH_LRU_SIZE; i++) {
1427 brelse(b->bhs[i]);
1428 b->bhs[i] = NULL;
1430 put_cpu_var(bh_lrus);
1433 void invalidate_bh_lrus(void)
1435 on_each_cpu(invalidate_bh_lru, NULL, 1, 1);
1438 void set_bh_page(struct buffer_head *bh,
1439 struct page *page, unsigned long offset)
1441 bh->b_page = page;
1442 BUG_ON(offset >= PAGE_SIZE);
1443 if (PageHighMem(page))
1445 * This catches illegal uses and preserves the offset:
1447 bh->b_data = (char *)(0 + offset);
1448 else
1449 bh->b_data = page_address(page) + offset;
1451 EXPORT_SYMBOL(set_bh_page);
1454 * Called when truncating a buffer on a page completely.
1456 static void discard_buffer(struct buffer_head * bh)
1458 lock_buffer(bh);
1459 clear_buffer_dirty(bh);
1460 bh->b_bdev = NULL;
1461 clear_buffer_mapped(bh);
1462 clear_buffer_req(bh);
1463 clear_buffer_new(bh);
1464 clear_buffer_delay(bh);
1465 clear_buffer_unwritten(bh);
1466 unlock_buffer(bh);
1470 * block_invalidatepage - invalidate part of all of a buffer-backed page
1472 * @page: the page which is affected
1473 * @offset: the index of the truncation point
1475 * block_invalidatepage() is called when all or part of the page has become
1476 * invalidatedby a truncate operation.
1478 * block_invalidatepage() does not have to release all buffers, but it must
1479 * ensure that no dirty buffer is left outside @offset and that no I/O
1480 * is underway against any of the blocks which are outside the truncation
1481 * point. Because the caller is about to free (and possibly reuse) those
1482 * blocks on-disk.
1484 void block_invalidatepage(struct page *page, unsigned long offset)
1486 struct buffer_head *head, *bh, *next;
1487 unsigned int curr_off = 0;
1489 BUG_ON(!PageLocked(page));
1490 if (!page_has_buffers(page))
1491 goto out;
1493 head = page_buffers(page);
1494 bh = head;
1495 do {
1496 unsigned int next_off = curr_off + bh->b_size;
1497 next = bh->b_this_page;
1500 * is this block fully invalidated?
1502 if (offset <= curr_off)
1503 discard_buffer(bh);
1504 curr_off = next_off;
1505 bh = next;
1506 } while (bh != head);
1509 * We release buffers only if the entire page is being invalidated.
1510 * The get_block cached value has been unconditionally invalidated,
1511 * so real IO is not possible anymore.
1513 if (offset == 0)
1514 try_to_release_page(page, 0);
1515 out:
1516 return;
1518 EXPORT_SYMBOL(block_invalidatepage);
1521 * We attach and possibly dirty the buffers atomically wrt
1522 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
1523 * is already excluded via the page lock.
1525 void create_empty_buffers(struct page *page,
1526 unsigned long blocksize, unsigned long b_state)
1528 struct buffer_head *bh, *head, *tail;
1530 head = alloc_page_buffers(page, blocksize, 1);
1531 bh = head;
1532 do {
1533 bh->b_state |= b_state;
1534 tail = bh;
1535 bh = bh->b_this_page;
1536 } while (bh);
1537 tail->b_this_page = head;
1539 spin_lock(&page->mapping->private_lock);
1540 if (PageUptodate(page) || PageDirty(page)) {
1541 bh = head;
1542 do {
1543 if (PageDirty(page))
1544 set_buffer_dirty(bh);
1545 if (PageUptodate(page))
1546 set_buffer_uptodate(bh);
1547 bh = bh->b_this_page;
1548 } while (bh != head);
1550 attach_page_buffers(page, head);
1551 spin_unlock(&page->mapping->private_lock);
1553 EXPORT_SYMBOL(create_empty_buffers);
1556 * We are taking a block for data and we don't want any output from any
1557 * buffer-cache aliases starting from return from that function and
1558 * until the moment when something will explicitly mark the buffer
1559 * dirty (hopefully that will not happen until we will free that block ;-)
1560 * We don't even need to mark it not-uptodate - nobody can expect
1561 * anything from a newly allocated buffer anyway. We used to used
1562 * unmap_buffer() for such invalidation, but that was wrong. We definitely
1563 * don't want to mark the alias unmapped, for example - it would confuse
1564 * anyone who might pick it with bread() afterwards...
1566 * Also.. Note that bforget() doesn't lock the buffer. So there can
1567 * be writeout I/O going on against recently-freed buffers. We don't
1568 * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1569 * only if we really need to. That happens here.
1571 void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1573 struct buffer_head *old_bh;
1575 might_sleep();
1577 old_bh = __find_get_block_slow(bdev, block);
1578 if (old_bh) {
1579 clear_buffer_dirty(old_bh);
1580 wait_on_buffer(old_bh);
1581 clear_buffer_req(old_bh);
1582 __brelse(old_bh);
1585 EXPORT_SYMBOL(unmap_underlying_metadata);
1588 * NOTE! All mapped/uptodate combinations are valid:
1590 * Mapped Uptodate Meaning
1592 * No No "unknown" - must do get_block()
1593 * No Yes "hole" - zero-filled
1594 * Yes No "allocated" - allocated on disk, not read in
1595 * Yes Yes "valid" - allocated and up-to-date in memory.
1597 * "Dirty" is valid only with the last case (mapped+uptodate).
1601 * While block_write_full_page is writing back the dirty buffers under
1602 * the page lock, whoever dirtied the buffers may decide to clean them
1603 * again at any time. We handle that by only looking at the buffer
1604 * state inside lock_buffer().
1606 * If block_write_full_page() is called for regular writeback
1607 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1608 * locked buffer. This only can happen if someone has written the buffer
1609 * directly, with submit_bh(). At the address_space level PageWriteback
1610 * prevents this contention from occurring.
1612 static int __block_write_full_page(struct inode *inode, struct page *page,
1613 get_block_t *get_block, struct writeback_control *wbc)
1615 int err;
1616 sector_t block;
1617 sector_t last_block;
1618 struct buffer_head *bh, *head;
1619 const unsigned blocksize = 1 << inode->i_blkbits;
1620 int nr_underway = 0;
1622 BUG_ON(!PageLocked(page));
1624 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1626 if (!page_has_buffers(page)) {
1627 create_empty_buffers(page, blocksize,
1628 (1 << BH_Dirty)|(1 << BH_Uptodate));
1632 * Be very careful. We have no exclusion from __set_page_dirty_buffers
1633 * here, and the (potentially unmapped) buffers may become dirty at
1634 * any time. If a buffer becomes dirty here after we've inspected it
1635 * then we just miss that fact, and the page stays dirty.
1637 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1638 * handle that here by just cleaning them.
1641 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1642 head = page_buffers(page);
1643 bh = head;
1646 * Get all the dirty buffers mapped to disk addresses and
1647 * handle any aliases from the underlying blockdev's mapping.
1649 do {
1650 if (block > last_block) {
1652 * mapped buffers outside i_size will occur, because
1653 * this page can be outside i_size when there is a
1654 * truncate in progress.
1657 * The buffer was zeroed by block_write_full_page()
1659 clear_buffer_dirty(bh);
1660 set_buffer_uptodate(bh);
1661 } else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
1662 WARN_ON(bh->b_size != blocksize);
1663 err = get_block(inode, block, bh, 1);
1664 if (err)
1665 goto recover;
1666 if (buffer_new(bh)) {
1667 /* blockdev mappings never come here */
1668 clear_buffer_new(bh);
1669 unmap_underlying_metadata(bh->b_bdev,
1670 bh->b_blocknr);
1673 bh = bh->b_this_page;
1674 block++;
1675 } while (bh != head);
1677 do {
1678 if (!buffer_mapped(bh))
1679 continue;
1681 * If it's a fully non-blocking write attempt and we cannot
1682 * lock the buffer then redirty the page. Note that this can
1683 * potentially cause a busy-wait loop from pdflush and kswapd
1684 * activity, but those code paths have their own higher-level
1685 * throttling.
1687 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1688 lock_buffer(bh);
1689 } else if (test_set_buffer_locked(bh)) {
1690 redirty_page_for_writepage(wbc, page);
1691 continue;
1693 if (test_clear_buffer_dirty(bh)) {
1694 mark_buffer_async_write(bh);
1695 } else {
1696 unlock_buffer(bh);
1698 } while ((bh = bh->b_this_page) != head);
1701 * The page and its buffers are protected by PageWriteback(), so we can
1702 * drop the bh refcounts early.
1704 BUG_ON(PageWriteback(page));
1705 set_page_writeback(page);
1707 do {
1708 struct buffer_head *next = bh->b_this_page;
1709 if (buffer_async_write(bh)) {
1710 submit_bh(WRITE, bh);
1711 nr_underway++;
1713 bh = next;
1714 } while (bh != head);
1715 unlock_page(page);
1717 err = 0;
1718 done:
1719 if (nr_underway == 0) {
1721 * The page was marked dirty, but the buffers were
1722 * clean. Someone wrote them back by hand with
1723 * ll_rw_block/submit_bh. A rare case.
1725 end_page_writeback(page);
1728 * The page and buffer_heads can be released at any time from
1729 * here on.
1731 wbc->pages_skipped++; /* We didn't write this page */
1733 return err;
1735 recover:
1737 * ENOSPC, or some other error. We may already have added some
1738 * blocks to the file, so we need to write these out to avoid
1739 * exposing stale data.
1740 * The page is currently locked and not marked for writeback
1742 bh = head;
1743 /* Recovery: lock and submit the mapped buffers */
1744 do {
1745 if (buffer_mapped(bh) && buffer_dirty(bh)) {
1746 lock_buffer(bh);
1747 mark_buffer_async_write(bh);
1748 } else {
1750 * The buffer may have been set dirty during
1751 * attachment to a dirty page.
1753 clear_buffer_dirty(bh);
1755 } while ((bh = bh->b_this_page) != head);
1756 SetPageError(page);
1757 BUG_ON(PageWriteback(page));
1758 mapping_set_error(page->mapping, err);
1759 set_page_writeback(page);
1760 do {
1761 struct buffer_head *next = bh->b_this_page;
1762 if (buffer_async_write(bh)) {
1763 clear_buffer_dirty(bh);
1764 submit_bh(WRITE, bh);
1765 nr_underway++;
1767 bh = next;
1768 } while (bh != head);
1769 unlock_page(page);
1770 goto done;
1774 * If a page has any new buffers, zero them out here, and mark them uptodate
1775 * and dirty so they'll be written out (in order to prevent uninitialised
1776 * block data from leaking). And clear the new bit.
1778 void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1780 unsigned int block_start, block_end;
1781 struct buffer_head *head, *bh;
1783 BUG_ON(!PageLocked(page));
1784 if (!page_has_buffers(page))
1785 return;
1787 bh = head = page_buffers(page);
1788 block_start = 0;
1789 do {
1790 block_end = block_start + bh->b_size;
1792 if (buffer_new(bh)) {
1793 if (block_end > from && block_start < to) {
1794 if (!PageUptodate(page)) {
1795 unsigned start, size;
1797 start = max(from, block_start);
1798 size = min(to, block_end) - start;
1800 zero_user_page(page, start, size, KM_USER0);
1801 set_buffer_uptodate(bh);
1804 clear_buffer_new(bh);
1805 mark_buffer_dirty(bh);
1809 block_start = block_end;
1810 bh = bh->b_this_page;
1811 } while (bh != head);
1813 EXPORT_SYMBOL(page_zero_new_buffers);
1815 static int __block_prepare_write(struct inode *inode, struct page *page,
1816 unsigned from, unsigned to, get_block_t *get_block)
1818 unsigned block_start, block_end;
1819 sector_t block;
1820 int err = 0;
1821 unsigned blocksize, bbits;
1822 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1824 BUG_ON(!PageLocked(page));
1825 BUG_ON(from > PAGE_CACHE_SIZE);
1826 BUG_ON(to > PAGE_CACHE_SIZE);
1827 BUG_ON(from > to);
1829 blocksize = 1 << inode->i_blkbits;
1830 if (!page_has_buffers(page))
1831 create_empty_buffers(page, blocksize, 0);
1832 head = page_buffers(page);
1834 bbits = inode->i_blkbits;
1835 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1837 for(bh = head, block_start = 0; bh != head || !block_start;
1838 block++, block_start=block_end, bh = bh->b_this_page) {
1839 block_end = block_start + blocksize;
1840 if (block_end <= from || block_start >= to) {
1841 if (PageUptodate(page)) {
1842 if (!buffer_uptodate(bh))
1843 set_buffer_uptodate(bh);
1845 continue;
1847 if (buffer_new(bh))
1848 clear_buffer_new(bh);
1849 if (!buffer_mapped(bh)) {
1850 WARN_ON(bh->b_size != blocksize);
1851 err = get_block(inode, block, bh, 1);
1852 if (err)
1853 break;
1854 if (buffer_new(bh)) {
1855 unmap_underlying_metadata(bh->b_bdev,
1856 bh->b_blocknr);
1857 if (PageUptodate(page)) {
1858 clear_buffer_new(bh);
1859 set_buffer_uptodate(bh);
1860 mark_buffer_dirty(bh);
1861 continue;
1863 if (block_end > to || block_start < from) {
1864 void *kaddr;
1866 kaddr = kmap_atomic(page, KM_USER0);
1867 if (block_end > to)
1868 memset(kaddr+to, 0,
1869 block_end-to);
1870 if (block_start < from)
1871 memset(kaddr+block_start,
1872 0, from-block_start);
1873 flush_dcache_page(page);
1874 kunmap_atomic(kaddr, KM_USER0);
1876 continue;
1879 if (PageUptodate(page)) {
1880 if (!buffer_uptodate(bh))
1881 set_buffer_uptodate(bh);
1882 continue;
1884 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1885 !buffer_unwritten(bh) &&
1886 (block_start < from || block_end > to)) {
1887 ll_rw_block(READ, 1, &bh);
1888 *wait_bh++=bh;
1892 * If we issued read requests - let them complete.
1894 while(wait_bh > wait) {
1895 wait_on_buffer(*--wait_bh);
1896 if (!buffer_uptodate(*wait_bh))
1897 err = -EIO;
1899 if (unlikely(err))
1900 page_zero_new_buffers(page, from, to);
1901 return err;
1904 static int __block_commit_write(struct inode *inode, struct page *page,
1905 unsigned from, unsigned to)
1907 unsigned block_start, block_end;
1908 int partial = 0;
1909 unsigned blocksize;
1910 struct buffer_head *bh, *head;
1912 blocksize = 1 << inode->i_blkbits;
1914 for(bh = head = page_buffers(page), block_start = 0;
1915 bh != head || !block_start;
1916 block_start=block_end, bh = bh->b_this_page) {
1917 block_end = block_start + blocksize;
1918 if (block_end <= from || block_start >= to) {
1919 if (!buffer_uptodate(bh))
1920 partial = 1;
1921 } else {
1922 set_buffer_uptodate(bh);
1923 mark_buffer_dirty(bh);
1925 clear_buffer_new(bh);
1929 * If this is a partial write which happened to make all buffers
1930 * uptodate then we can optimize away a bogus readpage() for
1931 * the next read(). Here we 'discover' whether the page went
1932 * uptodate as a result of this (potentially partial) write.
1934 if (!partial)
1935 SetPageUptodate(page);
1936 return 0;
1940 * block_write_begin takes care of the basic task of block allocation and
1941 * bringing partial write blocks uptodate first.
1943 * If *pagep is not NULL, then block_write_begin uses the locked page
1944 * at *pagep rather than allocating its own. In this case, the page will
1945 * not be unlocked or deallocated on failure.
1947 int block_write_begin(struct file *file, struct address_space *mapping,
1948 loff_t pos, unsigned len, unsigned flags,
1949 struct page **pagep, void **fsdata,
1950 get_block_t *get_block)
1952 struct inode *inode = mapping->host;
1953 int status = 0;
1954 struct page *page;
1955 pgoff_t index;
1956 unsigned start, end;
1957 int ownpage = 0;
1959 index = pos >> PAGE_CACHE_SHIFT;
1960 start = pos & (PAGE_CACHE_SIZE - 1);
1961 end = start + len;
1963 page = *pagep;
1964 if (page == NULL) {
1965 ownpage = 1;
1966 page = __grab_cache_page(mapping, index);
1967 if (!page) {
1968 status = -ENOMEM;
1969 goto out;
1971 *pagep = page;
1972 } else
1973 BUG_ON(!PageLocked(page));
1975 status = __block_prepare_write(inode, page, start, end, get_block);
1976 if (unlikely(status)) {
1977 ClearPageUptodate(page);
1979 if (ownpage) {
1980 unlock_page(page);
1981 page_cache_release(page);
1982 *pagep = NULL;
1985 * prepare_write() may have instantiated a few blocks
1986 * outside i_size. Trim these off again. Don't need
1987 * i_size_read because we hold i_mutex.
1989 if (pos + len > inode->i_size)
1990 vmtruncate(inode, inode->i_size);
1992 goto out;
1995 out:
1996 return status;
1998 EXPORT_SYMBOL(block_write_begin);
2000 int block_write_end(struct file *file, struct address_space *mapping,
2001 loff_t pos, unsigned len, unsigned copied,
2002 struct page *page, void *fsdata)
2004 struct inode *inode = mapping->host;
2005 unsigned start;
2007 start = pos & (PAGE_CACHE_SIZE - 1);
2009 if (unlikely(copied < len)) {
2011 * The buffers that were written will now be uptodate, so we
2012 * don't have to worry about a readpage reading them and
2013 * overwriting a partial write. However if we have encountered
2014 * a short write and only partially written into a buffer, it
2015 * will not be marked uptodate, so a readpage might come in and
2016 * destroy our partial write.
2018 * Do the simplest thing, and just treat any short write to a
2019 * non uptodate page as a zero-length write, and force the
2020 * caller to redo the whole thing.
2022 if (!PageUptodate(page))
2023 copied = 0;
2025 page_zero_new_buffers(page, start+copied, start+len);
2027 flush_dcache_page(page);
2029 /* This could be a short (even 0-length) commit */
2030 __block_commit_write(inode, page, start, start+copied);
2032 return copied;
2034 EXPORT_SYMBOL(block_write_end);
2036 int generic_write_end(struct file *file, struct address_space *mapping,
2037 loff_t pos, unsigned len, unsigned copied,
2038 struct page *page, void *fsdata)
2040 struct inode *inode = mapping->host;
2042 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2045 * No need to use i_size_read() here, the i_size
2046 * cannot change under us because we hold i_mutex.
2048 * But it's important to update i_size while still holding page lock:
2049 * page writeout could otherwise come in and zero beyond i_size.
2051 if (pos+copied > inode->i_size) {
2052 i_size_write(inode, pos+copied);
2053 mark_inode_dirty(inode);
2056 unlock_page(page);
2057 page_cache_release(page);
2059 return copied;
2061 EXPORT_SYMBOL(generic_write_end);
2064 * Generic "read page" function for block devices that have the normal
2065 * get_block functionality. This is most of the block device filesystems.
2066 * Reads the page asynchronously --- the unlock_buffer() and
2067 * set/clear_buffer_uptodate() functions propagate buffer state into the
2068 * page struct once IO has completed.
2070 int block_read_full_page(struct page *page, get_block_t *get_block)
2072 struct inode *inode = page->mapping->host;
2073 sector_t iblock, lblock;
2074 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2075 unsigned int blocksize;
2076 int nr, i;
2077 int fully_mapped = 1;
2079 BUG_ON(!PageLocked(page));
2080 blocksize = 1 << inode->i_blkbits;
2081 if (!page_has_buffers(page))
2082 create_empty_buffers(page, blocksize, 0);
2083 head = page_buffers(page);
2085 iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2086 lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2087 bh = head;
2088 nr = 0;
2089 i = 0;
2091 do {
2092 if (buffer_uptodate(bh))
2093 continue;
2095 if (!buffer_mapped(bh)) {
2096 int err = 0;
2098 fully_mapped = 0;
2099 if (iblock < lblock) {
2100 WARN_ON(bh->b_size != blocksize);
2101 err = get_block(inode, iblock, bh, 0);
2102 if (err)
2103 SetPageError(page);
2105 if (!buffer_mapped(bh)) {
2106 zero_user_page(page, i * blocksize, blocksize,
2107 KM_USER0);
2108 if (!err)
2109 set_buffer_uptodate(bh);
2110 continue;
2113 * get_block() might have updated the buffer
2114 * synchronously
2116 if (buffer_uptodate(bh))
2117 continue;
2119 arr[nr++] = bh;
2120 } while (i++, iblock++, (bh = bh->b_this_page) != head);
2122 if (fully_mapped)
2123 SetPageMappedToDisk(page);
2125 if (!nr) {
2127 * All buffers are uptodate - we can set the page uptodate
2128 * as well. But not if get_block() returned an error.
2130 if (!PageError(page))
2131 SetPageUptodate(page);
2132 unlock_page(page);
2133 return 0;
2136 /* Stage two: lock the buffers */
2137 for (i = 0; i < nr; i++) {
2138 bh = arr[i];
2139 lock_buffer(bh);
2140 mark_buffer_async_read(bh);
2144 * Stage 3: start the IO. Check for uptodateness
2145 * inside the buffer lock in case another process reading
2146 * the underlying blockdev brought it uptodate (the sct fix).
2148 for (i = 0; i < nr; i++) {
2149 bh = arr[i];
2150 if (buffer_uptodate(bh))
2151 end_buffer_async_read(bh, 1);
2152 else
2153 submit_bh(READ, bh);
2155 return 0;
2158 /* utility function for filesystems that need to do work on expanding
2159 * truncates. Uses filesystem pagecache writes to allow the filesystem to
2160 * deal with the hole.
2162 int generic_cont_expand_simple(struct inode *inode, loff_t size)
2164 struct address_space *mapping = inode->i_mapping;
2165 struct page *page;
2166 void *fsdata;
2167 unsigned long limit;
2168 int err;
2170 err = -EFBIG;
2171 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
2172 if (limit != RLIM_INFINITY && size > (loff_t)limit) {
2173 send_sig(SIGXFSZ, current, 0);
2174 goto out;
2176 if (size > inode->i_sb->s_maxbytes)
2177 goto out;
2179 err = pagecache_write_begin(NULL, mapping, size, 0,
2180 AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND,
2181 &page, &fsdata);
2182 if (err)
2183 goto out;
2185 err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
2186 BUG_ON(err > 0);
2188 out:
2189 return err;
2192 int cont_expand_zero(struct file *file, struct address_space *mapping,
2193 loff_t pos, loff_t *bytes)
2195 struct inode *inode = mapping->host;
2196 unsigned blocksize = 1 << inode->i_blkbits;
2197 struct page *page;
2198 void *fsdata;
2199 pgoff_t index, curidx;
2200 loff_t curpos;
2201 unsigned zerofrom, offset, len;
2202 int err = 0;
2204 index = pos >> PAGE_CACHE_SHIFT;
2205 offset = pos & ~PAGE_CACHE_MASK;
2207 while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
2208 zerofrom = curpos & ~PAGE_CACHE_MASK;
2209 if (zerofrom & (blocksize-1)) {
2210 *bytes |= (blocksize-1);
2211 (*bytes)++;
2213 len = PAGE_CACHE_SIZE - zerofrom;
2215 err = pagecache_write_begin(file, mapping, curpos, len,
2216 AOP_FLAG_UNINTERRUPTIBLE,
2217 &page, &fsdata);
2218 if (err)
2219 goto out;
2220 zero_user_page(page, zerofrom, len, KM_USER0);
2221 err = pagecache_write_end(file, mapping, curpos, len, len,
2222 page, fsdata);
2223 if (err < 0)
2224 goto out;
2225 BUG_ON(err != len);
2226 err = 0;
2229 /* page covers the boundary, find the boundary offset */
2230 if (index == curidx) {
2231 zerofrom = curpos & ~PAGE_CACHE_MASK;
2232 /* if we will expand the thing last block will be filled */
2233 if (offset <= zerofrom) {
2234 goto out;
2236 if (zerofrom & (blocksize-1)) {
2237 *bytes |= (blocksize-1);
2238 (*bytes)++;
2240 len = offset - zerofrom;
2242 err = pagecache_write_begin(file, mapping, curpos, len,
2243 AOP_FLAG_UNINTERRUPTIBLE,
2244 &page, &fsdata);
2245 if (err)
2246 goto out;
2247 zero_user_page(page, zerofrom, len, KM_USER0);
2248 err = pagecache_write_end(file, mapping, curpos, len, len,
2249 page, fsdata);
2250 if (err < 0)
2251 goto out;
2252 BUG_ON(err != len);
2253 err = 0;
2255 out:
2256 return err;
2260 * For moronic filesystems that do not allow holes in file.
2261 * We may have to extend the file.
2263 int cont_write_begin(struct file *file, struct address_space *mapping,
2264 loff_t pos, unsigned len, unsigned flags,
2265 struct page **pagep, void **fsdata,
2266 get_block_t *get_block, loff_t *bytes)
2268 struct inode *inode = mapping->host;
2269 unsigned blocksize = 1 << inode->i_blkbits;
2270 unsigned zerofrom;
2271 int err;
2273 err = cont_expand_zero(file, mapping, pos, bytes);
2274 if (err)
2275 goto out;
2277 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2278 if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2279 *bytes |= (blocksize-1);
2280 (*bytes)++;
2283 *pagep = NULL;
2284 err = block_write_begin(file, mapping, pos, len,
2285 flags, pagep, fsdata, get_block);
2286 out:
2287 return err;
2290 int block_prepare_write(struct page *page, unsigned from, unsigned to,
2291 get_block_t *get_block)
2293 struct inode *inode = page->mapping->host;
2294 int err = __block_prepare_write(inode, page, from, to, get_block);
2295 if (err)
2296 ClearPageUptodate(page);
2297 return err;
2300 int block_commit_write(struct page *page, unsigned from, unsigned to)
2302 struct inode *inode = page->mapping->host;
2303 __block_commit_write(inode,page,from,to);
2304 return 0;
2307 int generic_commit_write(struct file *file, struct page *page,
2308 unsigned from, unsigned to)
2310 struct inode *inode = page->mapping->host;
2311 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2312 __block_commit_write(inode,page,from,to);
2314 * No need to use i_size_read() here, the i_size
2315 * cannot change under us because we hold i_mutex.
2317 if (pos > inode->i_size) {
2318 i_size_write(inode, pos);
2319 mark_inode_dirty(inode);
2321 return 0;
2325 * block_page_mkwrite() is not allowed to change the file size as it gets
2326 * called from a page fault handler when a page is first dirtied. Hence we must
2327 * be careful to check for EOF conditions here. We set the page up correctly
2328 * for a written page which means we get ENOSPC checking when writing into
2329 * holes and correct delalloc and unwritten extent mapping on filesystems that
2330 * support these features.
2332 * We are not allowed to take the i_mutex here so we have to play games to
2333 * protect against truncate races as the page could now be beyond EOF. Because
2334 * vmtruncate() writes the inode size before removing pages, once we have the
2335 * page lock we can determine safely if the page is beyond EOF. If it is not
2336 * beyond EOF, then the page is guaranteed safe against truncation until we
2337 * unlock the page.
2340 block_page_mkwrite(struct vm_area_struct *vma, struct page *page,
2341 get_block_t get_block)
2343 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
2344 unsigned long end;
2345 loff_t size;
2346 int ret = -EINVAL;
2348 lock_page(page);
2349 size = i_size_read(inode);
2350 if ((page->mapping != inode->i_mapping) ||
2351 (page_offset(page) > size)) {
2352 /* page got truncated out from underneath us */
2353 goto out_unlock;
2356 /* page is wholly or partially inside EOF */
2357 if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
2358 end = size & ~PAGE_CACHE_MASK;
2359 else
2360 end = PAGE_CACHE_SIZE;
2362 ret = block_prepare_write(page, 0, end, get_block);
2363 if (!ret)
2364 ret = block_commit_write(page, 0, end);
2366 out_unlock:
2367 unlock_page(page);
2368 return ret;
2372 * nobh_write_begin()'s prereads are special: the buffer_heads are freed
2373 * immediately, while under the page lock. So it needs a special end_io
2374 * handler which does not touch the bh after unlocking it.
2376 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2378 __end_buffer_read_notouch(bh, uptodate);
2382 * Attach the singly-linked list of buffers created by nobh_write_begin, to
2383 * the page (converting it to circular linked list and taking care of page
2384 * dirty races).
2386 static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
2388 struct buffer_head *bh;
2390 BUG_ON(!PageLocked(page));
2392 spin_lock(&page->mapping->private_lock);
2393 bh = head;
2394 do {
2395 if (PageDirty(page))
2396 set_buffer_dirty(bh);
2397 if (!bh->b_this_page)
2398 bh->b_this_page = head;
2399 bh = bh->b_this_page;
2400 } while (bh != head);
2401 attach_page_buffers(page, head);
2402 spin_unlock(&page->mapping->private_lock);
2406 * On entry, the page is fully not uptodate.
2407 * On exit the page is fully uptodate in the areas outside (from,to)
2409 int nobh_write_begin(struct file *file, struct address_space *mapping,
2410 loff_t pos, unsigned len, unsigned flags,
2411 struct page **pagep, void **fsdata,
2412 get_block_t *get_block)
2414 struct inode *inode = mapping->host;
2415 const unsigned blkbits = inode->i_blkbits;
2416 const unsigned blocksize = 1 << blkbits;
2417 struct buffer_head *head, *bh;
2418 struct page *page;
2419 pgoff_t index;
2420 unsigned from, to;
2421 unsigned block_in_page;
2422 unsigned block_start, block_end;
2423 sector_t block_in_file;
2424 char *kaddr;
2425 int nr_reads = 0;
2426 int ret = 0;
2427 int is_mapped_to_disk = 1;
2429 index = pos >> PAGE_CACHE_SHIFT;
2430 from = pos & (PAGE_CACHE_SIZE - 1);
2431 to = from + len;
2433 page = __grab_cache_page(mapping, index);
2434 if (!page)
2435 return -ENOMEM;
2436 *pagep = page;
2437 *fsdata = NULL;
2439 if (page_has_buffers(page)) {
2440 unlock_page(page);
2441 page_cache_release(page);
2442 *pagep = NULL;
2443 return block_write_begin(file, mapping, pos, len, flags, pagep,
2444 fsdata, get_block);
2447 if (PageMappedToDisk(page))
2448 return 0;
2451 * Allocate buffers so that we can keep track of state, and potentially
2452 * attach them to the page if an error occurs. In the common case of
2453 * no error, they will just be freed again without ever being attached
2454 * to the page (which is all OK, because we're under the page lock).
2456 * Be careful: the buffer linked list is a NULL terminated one, rather
2457 * than the circular one we're used to.
2459 head = alloc_page_buffers(page, blocksize, 0);
2460 if (!head) {
2461 ret = -ENOMEM;
2462 goto out_release;
2465 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2468 * We loop across all blocks in the page, whether or not they are
2469 * part of the affected region. This is so we can discover if the
2470 * page is fully mapped-to-disk.
2472 for (block_start = 0, block_in_page = 0, bh = head;
2473 block_start < PAGE_CACHE_SIZE;
2474 block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
2475 int create;
2477 block_end = block_start + blocksize;
2478 bh->b_state = 0;
2479 create = 1;
2480 if (block_start >= to)
2481 create = 0;
2482 ret = get_block(inode, block_in_file + block_in_page,
2483 bh, create);
2484 if (ret)
2485 goto failed;
2486 if (!buffer_mapped(bh))
2487 is_mapped_to_disk = 0;
2488 if (buffer_new(bh))
2489 unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
2490 if (PageUptodate(page)) {
2491 set_buffer_uptodate(bh);
2492 continue;
2494 if (buffer_new(bh) || !buffer_mapped(bh)) {
2495 kaddr = kmap_atomic(page, KM_USER0);
2496 if (block_start < from)
2497 memset(kaddr+block_start, 0, from-block_start);
2498 if (block_end > to)
2499 memset(kaddr + to, 0, block_end - to);
2500 flush_dcache_page(page);
2501 kunmap_atomic(kaddr, KM_USER0);
2502 continue;
2504 if (buffer_uptodate(bh))
2505 continue; /* reiserfs does this */
2506 if (block_start < from || block_end > to) {
2507 lock_buffer(bh);
2508 bh->b_end_io = end_buffer_read_nobh;
2509 submit_bh(READ, bh);
2510 nr_reads++;
2514 if (nr_reads) {
2516 * The page is locked, so these buffers are protected from
2517 * any VM or truncate activity. Hence we don't need to care
2518 * for the buffer_head refcounts.
2520 for (bh = head; bh; bh = bh->b_this_page) {
2521 wait_on_buffer(bh);
2522 if (!buffer_uptodate(bh))
2523 ret = -EIO;
2525 if (ret)
2526 goto failed;
2529 if (is_mapped_to_disk)
2530 SetPageMappedToDisk(page);
2532 *fsdata = head; /* to be released by nobh_write_end */
2534 return 0;
2536 failed:
2537 BUG_ON(!ret);
2539 * Error recovery is a bit difficult. We need to zero out blocks that
2540 * were newly allocated, and dirty them to ensure they get written out.
2541 * Buffers need to be attached to the page at this point, otherwise
2542 * the handling of potential IO errors during writeout would be hard
2543 * (could try doing synchronous writeout, but what if that fails too?)
2545 attach_nobh_buffers(page, head);
2546 page_zero_new_buffers(page, from, to);
2548 out_release:
2549 unlock_page(page);
2550 page_cache_release(page);
2551 *pagep = NULL;
2553 if (pos + len > inode->i_size)
2554 vmtruncate(inode, inode->i_size);
2556 return ret;
2558 EXPORT_SYMBOL(nobh_write_begin);
2560 int nobh_write_end(struct file *file, struct address_space *mapping,
2561 loff_t pos, unsigned len, unsigned copied,
2562 struct page *page, void *fsdata)
2564 struct inode *inode = page->mapping->host;
2565 struct buffer_head *head = NULL;
2566 struct buffer_head *bh;
2568 if (!PageMappedToDisk(page)) {
2569 if (unlikely(copied < len) && !page_has_buffers(page))
2570 attach_nobh_buffers(page, head);
2571 if (page_has_buffers(page))
2572 return generic_write_end(file, mapping, pos, len,
2573 copied, page, fsdata);
2576 SetPageUptodate(page);
2577 set_page_dirty(page);
2578 if (pos+copied > inode->i_size) {
2579 i_size_write(inode, pos+copied);
2580 mark_inode_dirty(inode);
2583 unlock_page(page);
2584 page_cache_release(page);
2586 head = fsdata;
2587 while (head) {
2588 bh = head;
2589 head = head->b_this_page;
2590 free_buffer_head(bh);
2593 return copied;
2595 EXPORT_SYMBOL(nobh_write_end);
2598 * nobh_writepage() - based on block_full_write_page() except
2599 * that it tries to operate without attaching bufferheads to
2600 * the page.
2602 int nobh_writepage(struct page *page, get_block_t *get_block,
2603 struct writeback_control *wbc)
2605 struct inode * const inode = page->mapping->host;
2606 loff_t i_size = i_size_read(inode);
2607 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2608 unsigned offset;
2609 int ret;
2611 /* Is the page fully inside i_size? */
2612 if (page->index < end_index)
2613 goto out;
2615 /* Is the page fully outside i_size? (truncate in progress) */
2616 offset = i_size & (PAGE_CACHE_SIZE-1);
2617 if (page->index >= end_index+1 || !offset) {
2619 * The page may have dirty, unmapped buffers. For example,
2620 * they may have been added in ext3_writepage(). Make them
2621 * freeable here, so the page does not leak.
2623 #if 0
2624 /* Not really sure about this - do we need this ? */
2625 if (page->mapping->a_ops->invalidatepage)
2626 page->mapping->a_ops->invalidatepage(page, offset);
2627 #endif
2628 unlock_page(page);
2629 return 0; /* don't care */
2633 * The page straddles i_size. It must be zeroed out on each and every
2634 * writepage invocation because it may be mmapped. "A file is mapped
2635 * in multiples of the page size. For a file that is not a multiple of
2636 * the page size, the remaining memory is zeroed when mapped, and
2637 * writes to that region are not written out to the file."
2639 zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0);
2640 out:
2641 ret = mpage_writepage(page, get_block, wbc);
2642 if (ret == -EAGAIN)
2643 ret = __block_write_full_page(inode, page, get_block, wbc);
2644 return ret;
2646 EXPORT_SYMBOL(nobh_writepage);
2648 int nobh_truncate_page(struct address_space *mapping,
2649 loff_t from, get_block_t *get_block)
2651 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2652 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2653 unsigned blocksize;
2654 sector_t iblock;
2655 unsigned length, pos;
2656 struct inode *inode = mapping->host;
2657 struct page *page;
2658 struct buffer_head map_bh;
2659 int err;
2661 blocksize = 1 << inode->i_blkbits;
2662 length = offset & (blocksize - 1);
2664 /* Block boundary? Nothing to do */
2665 if (!length)
2666 return 0;
2668 length = blocksize - length;
2669 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2671 page = grab_cache_page(mapping, index);
2672 err = -ENOMEM;
2673 if (!page)
2674 goto out;
2676 if (page_has_buffers(page)) {
2677 has_buffers:
2678 unlock_page(page);
2679 page_cache_release(page);
2680 return block_truncate_page(mapping, from, get_block);
2683 /* Find the buffer that contains "offset" */
2684 pos = blocksize;
2685 while (offset >= pos) {
2686 iblock++;
2687 pos += blocksize;
2690 err = get_block(inode, iblock, &map_bh, 0);
2691 if (err)
2692 goto unlock;
2693 /* unmapped? It's a hole - nothing to do */
2694 if (!buffer_mapped(&map_bh))
2695 goto unlock;
2697 /* Ok, it's mapped. Make sure it's up-to-date */
2698 if (!PageUptodate(page)) {
2699 err = mapping->a_ops->readpage(NULL, page);
2700 if (err) {
2701 page_cache_release(page);
2702 goto out;
2704 lock_page(page);
2705 if (!PageUptodate(page)) {
2706 err = -EIO;
2707 goto unlock;
2709 if (page_has_buffers(page))
2710 goto has_buffers;
2712 zero_user_page(page, offset, length, KM_USER0);
2713 set_page_dirty(page);
2714 err = 0;
2716 unlock:
2717 unlock_page(page);
2718 page_cache_release(page);
2719 out:
2720 return err;
2722 EXPORT_SYMBOL(nobh_truncate_page);
2724 int block_truncate_page(struct address_space *mapping,
2725 loff_t from, get_block_t *get_block)
2727 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2728 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2729 unsigned blocksize;
2730 sector_t iblock;
2731 unsigned length, pos;
2732 struct inode *inode = mapping->host;
2733 struct page *page;
2734 struct buffer_head *bh;
2735 int err;
2737 blocksize = 1 << inode->i_blkbits;
2738 length = offset & (blocksize - 1);
2740 /* Block boundary? Nothing to do */
2741 if (!length)
2742 return 0;
2744 length = blocksize - length;
2745 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2747 page = grab_cache_page(mapping, index);
2748 err = -ENOMEM;
2749 if (!page)
2750 goto out;
2752 if (!page_has_buffers(page))
2753 create_empty_buffers(page, blocksize, 0);
2755 /* Find the buffer that contains "offset" */
2756 bh = page_buffers(page);
2757 pos = blocksize;
2758 while (offset >= pos) {
2759 bh = bh->b_this_page;
2760 iblock++;
2761 pos += blocksize;
2764 err = 0;
2765 if (!buffer_mapped(bh)) {
2766 WARN_ON(bh->b_size != blocksize);
2767 err = get_block(inode, iblock, bh, 0);
2768 if (err)
2769 goto unlock;
2770 /* unmapped? It's a hole - nothing to do */
2771 if (!buffer_mapped(bh))
2772 goto unlock;
2775 /* Ok, it's mapped. Make sure it's up-to-date */
2776 if (PageUptodate(page))
2777 set_buffer_uptodate(bh);
2779 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2780 err = -EIO;
2781 ll_rw_block(READ, 1, &bh);
2782 wait_on_buffer(bh);
2783 /* Uhhuh. Read error. Complain and punt. */
2784 if (!buffer_uptodate(bh))
2785 goto unlock;
2788 zero_user_page(page, offset, length, KM_USER0);
2789 mark_buffer_dirty(bh);
2790 err = 0;
2792 unlock:
2793 unlock_page(page);
2794 page_cache_release(page);
2795 out:
2796 return err;
2800 * The generic ->writepage function for buffer-backed address_spaces
2802 int block_write_full_page(struct page *page, get_block_t *get_block,
2803 struct writeback_control *wbc)
2805 struct inode * const inode = page->mapping->host;
2806 loff_t i_size = i_size_read(inode);
2807 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2808 unsigned offset;
2810 /* Is the page fully inside i_size? */
2811 if (page->index < end_index)
2812 return __block_write_full_page(inode, page, get_block, wbc);
2814 /* Is the page fully outside i_size? (truncate in progress) */
2815 offset = i_size & (PAGE_CACHE_SIZE-1);
2816 if (page->index >= end_index+1 || !offset) {
2818 * The page may have dirty, unmapped buffers. For example,
2819 * they may have been added in ext3_writepage(). Make them
2820 * freeable here, so the page does not leak.
2822 do_invalidatepage(page, 0);
2823 unlock_page(page);
2824 return 0; /* don't care */
2828 * The page straddles i_size. It must be zeroed out on each and every
2829 * writepage invokation because it may be mmapped. "A file is mapped
2830 * in multiples of the page size. For a file that is not a multiple of
2831 * the page size, the remaining memory is zeroed when mapped, and
2832 * writes to that region are not written out to the file."
2834 zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0);
2835 return __block_write_full_page(inode, page, get_block, wbc);
2838 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2839 get_block_t *get_block)
2841 struct buffer_head tmp;
2842 struct inode *inode = mapping->host;
2843 tmp.b_state = 0;
2844 tmp.b_blocknr = 0;
2845 tmp.b_size = 1 << inode->i_blkbits;
2846 get_block(inode, block, &tmp, 0);
2847 return tmp.b_blocknr;
2850 static void end_bio_bh_io_sync(struct bio *bio, int err)
2852 struct buffer_head *bh = bio->bi_private;
2854 if (err == -EOPNOTSUPP) {
2855 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2856 set_bit(BH_Eopnotsupp, &bh->b_state);
2859 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2860 bio_put(bio);
2863 int submit_bh(int rw, struct buffer_head * bh)
2865 struct bio *bio;
2866 int ret = 0;
2868 BUG_ON(!buffer_locked(bh));
2869 BUG_ON(!buffer_mapped(bh));
2870 BUG_ON(!bh->b_end_io);
2872 if (buffer_ordered(bh) && (rw == WRITE))
2873 rw = WRITE_BARRIER;
2876 * Only clear out a write error when rewriting, should this
2877 * include WRITE_SYNC as well?
2879 if (test_set_buffer_req(bh) && (rw == WRITE || rw == WRITE_BARRIER))
2880 clear_buffer_write_io_error(bh);
2883 * from here on down, it's all bio -- do the initial mapping,
2884 * submit_bio -> generic_make_request may further map this bio around
2886 bio = bio_alloc(GFP_NOIO, 1);
2888 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2889 bio->bi_bdev = bh->b_bdev;
2890 bio->bi_io_vec[0].bv_page = bh->b_page;
2891 bio->bi_io_vec[0].bv_len = bh->b_size;
2892 bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2894 bio->bi_vcnt = 1;
2895 bio->bi_idx = 0;
2896 bio->bi_size = bh->b_size;
2898 bio->bi_end_io = end_bio_bh_io_sync;
2899 bio->bi_private = bh;
2901 bio_get(bio);
2902 submit_bio(rw, bio);
2904 if (bio_flagged(bio, BIO_EOPNOTSUPP))
2905 ret = -EOPNOTSUPP;
2907 bio_put(bio);
2908 return ret;
2912 * ll_rw_block: low-level access to block devices (DEPRECATED)
2913 * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
2914 * @nr: number of &struct buffer_heads in the array
2915 * @bhs: array of pointers to &struct buffer_head
2917 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2918 * requests an I/O operation on them, either a %READ or a %WRITE. The third
2919 * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
2920 * are sent to disk. The fourth %READA option is described in the documentation
2921 * for generic_make_request() which ll_rw_block() calls.
2923 * This function drops any buffer that it cannot get a lock on (with the
2924 * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
2925 * clean when doing a write request, and any buffer that appears to be
2926 * up-to-date when doing read request. Further it marks as clean buffers that
2927 * are processed for writing (the buffer cache won't assume that they are
2928 * actually clean until the buffer gets unlocked).
2930 * ll_rw_block sets b_end_io to simple completion handler that marks
2931 * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2932 * any waiters.
2934 * All of the buffers must be for the same device, and must also be a
2935 * multiple of the current approved size for the device.
2937 void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2939 int i;
2941 for (i = 0; i < nr; i++) {
2942 struct buffer_head *bh = bhs[i];
2944 if (rw == SWRITE)
2945 lock_buffer(bh);
2946 else if (test_set_buffer_locked(bh))
2947 continue;
2949 if (rw == WRITE || rw == SWRITE) {
2950 if (test_clear_buffer_dirty(bh)) {
2951 bh->b_end_io = end_buffer_write_sync;
2952 get_bh(bh);
2953 submit_bh(WRITE, bh);
2954 continue;
2956 } else {
2957 if (!buffer_uptodate(bh)) {
2958 bh->b_end_io = end_buffer_read_sync;
2959 get_bh(bh);
2960 submit_bh(rw, bh);
2961 continue;
2964 unlock_buffer(bh);
2969 * For a data-integrity writeout, we need to wait upon any in-progress I/O
2970 * and then start new I/O and then wait upon it. The caller must have a ref on
2971 * the buffer_head.
2973 int sync_dirty_buffer(struct buffer_head *bh)
2975 int ret = 0;
2977 WARN_ON(atomic_read(&bh->b_count) < 1);
2978 lock_buffer(bh);
2979 if (test_clear_buffer_dirty(bh)) {
2980 get_bh(bh);
2981 bh->b_end_io = end_buffer_write_sync;
2982 ret = submit_bh(WRITE, bh);
2983 wait_on_buffer(bh);
2984 if (buffer_eopnotsupp(bh)) {
2985 clear_buffer_eopnotsupp(bh);
2986 ret = -EOPNOTSUPP;
2988 if (!ret && !buffer_uptodate(bh))
2989 ret = -EIO;
2990 } else {
2991 unlock_buffer(bh);
2993 return ret;
2997 * try_to_free_buffers() checks if all the buffers on this particular page
2998 * are unused, and releases them if so.
3000 * Exclusion against try_to_free_buffers may be obtained by either
3001 * locking the page or by holding its mapping's private_lock.
3003 * If the page is dirty but all the buffers are clean then we need to
3004 * be sure to mark the page clean as well. This is because the page
3005 * may be against a block device, and a later reattachment of buffers
3006 * to a dirty page will set *all* buffers dirty. Which would corrupt
3007 * filesystem data on the same device.
3009 * The same applies to regular filesystem pages: if all the buffers are
3010 * clean then we set the page clean and proceed. To do that, we require
3011 * total exclusion from __set_page_dirty_buffers(). That is obtained with
3012 * private_lock.
3014 * try_to_free_buffers() is non-blocking.
3016 static inline int buffer_busy(struct buffer_head *bh)
3018 return atomic_read(&bh->b_count) |
3019 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
3022 static int
3023 drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
3025 struct buffer_head *head = page_buffers(page);
3026 struct buffer_head *bh;
3028 bh = head;
3029 do {
3030 if (buffer_write_io_error(bh) && page->mapping)
3031 set_bit(AS_EIO, &page->mapping->flags);
3032 if (buffer_busy(bh))
3033 goto failed;
3034 bh = bh->b_this_page;
3035 } while (bh != head);
3037 do {
3038 struct buffer_head *next = bh->b_this_page;
3040 if (!list_empty(&bh->b_assoc_buffers))
3041 __remove_assoc_queue(bh);
3042 bh = next;
3043 } while (bh != head);
3044 *buffers_to_free = head;
3045 __clear_page_buffers(page);
3046 return 1;
3047 failed:
3048 return 0;
3051 int try_to_free_buffers(struct page *page)
3053 struct address_space * const mapping = page->mapping;
3054 struct buffer_head *buffers_to_free = NULL;
3055 int ret = 0;
3057 BUG_ON(!PageLocked(page));
3058 if (PageWriteback(page))
3059 return 0;
3061 if (mapping == NULL) { /* can this still happen? */
3062 ret = drop_buffers(page, &buffers_to_free);
3063 goto out;
3066 spin_lock(&mapping->private_lock);
3067 ret = drop_buffers(page, &buffers_to_free);
3070 * If the filesystem writes its buffers by hand (eg ext3)
3071 * then we can have clean buffers against a dirty page. We
3072 * clean the page here; otherwise the VM will never notice
3073 * that the filesystem did any IO at all.
3075 * Also, during truncate, discard_buffer will have marked all
3076 * the page's buffers clean. We discover that here and clean
3077 * the page also.
3079 * private_lock must be held over this entire operation in order
3080 * to synchronise against __set_page_dirty_buffers and prevent the
3081 * dirty bit from being lost.
3083 if (ret)
3084 cancel_dirty_page(page, PAGE_CACHE_SIZE);
3085 spin_unlock(&mapping->private_lock);
3086 out:
3087 if (buffers_to_free) {
3088 struct buffer_head *bh = buffers_to_free;
3090 do {
3091 struct buffer_head *next = bh->b_this_page;
3092 free_buffer_head(bh);
3093 bh = next;
3094 } while (bh != buffers_to_free);
3096 return ret;
3098 EXPORT_SYMBOL(try_to_free_buffers);
3100 void block_sync_page(struct page *page)
3102 struct address_space *mapping;
3104 smp_mb();
3105 mapping = page_mapping(page);
3106 if (mapping)
3107 blk_run_backing_dev(mapping->backing_dev_info, page);
3111 * There are no bdflush tunables left. But distributions are
3112 * still running obsolete flush daemons, so we terminate them here.
3114 * Use of bdflush() is deprecated and will be removed in a future kernel.
3115 * The `pdflush' kernel threads fully replace bdflush daemons and this call.
3117 asmlinkage long sys_bdflush(int func, long data)
3119 static int msg_count;
3121 if (!capable(CAP_SYS_ADMIN))
3122 return -EPERM;
3124 if (msg_count < 5) {
3125 msg_count++;
3126 printk(KERN_INFO
3127 "warning: process `%s' used the obsolete bdflush"
3128 " system call\n", current->comm);
3129 printk(KERN_INFO "Fix your initscripts?\n");
3132 if (func == 1)
3133 do_exit(0);
3134 return 0;
3138 * Buffer-head allocation
3140 static struct kmem_cache *bh_cachep;
3143 * Once the number of bh's in the machine exceeds this level, we start
3144 * stripping them in writeback.
3146 static int max_buffer_heads;
3148 int buffer_heads_over_limit;
3150 struct bh_accounting {
3151 int nr; /* Number of live bh's */
3152 int ratelimit; /* Limit cacheline bouncing */
3155 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3157 static void recalc_bh_state(void)
3159 int i;
3160 int tot = 0;
3162 if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
3163 return;
3164 __get_cpu_var(bh_accounting).ratelimit = 0;
3165 for_each_online_cpu(i)
3166 tot += per_cpu(bh_accounting, i).nr;
3167 buffer_heads_over_limit = (tot > max_buffer_heads);
3170 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3172 struct buffer_head *ret = kmem_cache_zalloc(bh_cachep,
3173 set_migrateflags(gfp_flags, __GFP_RECLAIMABLE));
3174 if (ret) {
3175 INIT_LIST_HEAD(&ret->b_assoc_buffers);
3176 get_cpu_var(bh_accounting).nr++;
3177 recalc_bh_state();
3178 put_cpu_var(bh_accounting);
3180 return ret;
3182 EXPORT_SYMBOL(alloc_buffer_head);
3184 void free_buffer_head(struct buffer_head *bh)
3186 BUG_ON(!list_empty(&bh->b_assoc_buffers));
3187 kmem_cache_free(bh_cachep, bh);
3188 get_cpu_var(bh_accounting).nr--;
3189 recalc_bh_state();
3190 put_cpu_var(bh_accounting);
3192 EXPORT_SYMBOL(free_buffer_head);
3194 static void buffer_exit_cpu(int cpu)
3196 int i;
3197 struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3199 for (i = 0; i < BH_LRU_SIZE; i++) {
3200 brelse(b->bhs[i]);
3201 b->bhs[i] = NULL;
3203 get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
3204 per_cpu(bh_accounting, cpu).nr = 0;
3205 put_cpu_var(bh_accounting);
3208 static int buffer_cpu_notify(struct notifier_block *self,
3209 unsigned long action, void *hcpu)
3211 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
3212 buffer_exit_cpu((unsigned long)hcpu);
3213 return NOTIFY_OK;
3216 void __init buffer_init(void)
3218 int nrpages;
3220 bh_cachep = KMEM_CACHE(buffer_head,
3221 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD);
3224 * Limit the bh occupancy to 10% of ZONE_NORMAL
3226 nrpages = (nr_free_buffer_pages() * 10) / 100;
3227 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3228 hotcpu_notifier(buffer_cpu_notify, 0);
3231 EXPORT_SYMBOL(__bforget);
3232 EXPORT_SYMBOL(__brelse);
3233 EXPORT_SYMBOL(__wait_on_buffer);
3234 EXPORT_SYMBOL(block_commit_write);
3235 EXPORT_SYMBOL(block_prepare_write);
3236 EXPORT_SYMBOL(block_page_mkwrite);
3237 EXPORT_SYMBOL(block_read_full_page);
3238 EXPORT_SYMBOL(block_sync_page);
3239 EXPORT_SYMBOL(block_truncate_page);
3240 EXPORT_SYMBOL(block_write_full_page);
3241 EXPORT_SYMBOL(cont_write_begin);
3242 EXPORT_SYMBOL(end_buffer_read_sync);
3243 EXPORT_SYMBOL(end_buffer_write_sync);
3244 EXPORT_SYMBOL(file_fsync);
3245 EXPORT_SYMBOL(fsync_bdev);
3246 EXPORT_SYMBOL(generic_block_bmap);
3247 EXPORT_SYMBOL(generic_commit_write);
3248 EXPORT_SYMBOL(generic_cont_expand_simple);
3249 EXPORT_SYMBOL(init_buffer);
3250 EXPORT_SYMBOL(invalidate_bdev);
3251 EXPORT_SYMBOL(ll_rw_block);
3252 EXPORT_SYMBOL(mark_buffer_dirty);
3253 EXPORT_SYMBOL(submit_bh);
3254 EXPORT_SYMBOL(sync_dirty_buffer);
3255 EXPORT_SYMBOL(unlock_buffer);