[PATCH] freeze_bdev() cleanup
[linux-2.6/mini2440.git] / fs / buffer.c
blob3b3ab5281920ca00954ad38bbc2f8911fa4d18cf
1 /*
2 * linux/fs/buffer.c
4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
5 */
7 /*
8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
10 * Removed a lot of unnecessary code and simplified things now that
11 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
13 * Speed up hash, lru, and free list operations. Use gfp() for allocating
14 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
16 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
18 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
21 #include <linux/config.h>
22 #include <linux/kernel.h>
23 #include <linux/syscalls.h>
24 #include <linux/fs.h>
25 #include <linux/mm.h>
26 #include <linux/percpu.h>
27 #include <linux/slab.h>
28 #include <linux/smp_lock.h>
29 #include <linux/capability.h>
30 #include <linux/blkdev.h>
31 #include <linux/file.h>
32 #include <linux/quotaops.h>
33 #include <linux/highmem.h>
34 #include <linux/module.h>
35 #include <linux/writeback.h>
36 #include <linux/hash.h>
37 #include <linux/suspend.h>
38 #include <linux/buffer_head.h>
39 #include <linux/bio.h>
40 #include <linux/notifier.h>
41 #include <linux/cpu.h>
42 #include <linux/bitops.h>
43 #include <linux/mpage.h>
44 #include <linux/bit_spinlock.h>
46 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
47 static void invalidate_bh_lrus(void);
49 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
51 inline void
52 init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
54 bh->b_end_io = handler;
55 bh->b_private = private;
58 static int sync_buffer(void *word)
60 struct block_device *bd;
61 struct buffer_head *bh
62 = container_of(word, struct buffer_head, b_state);
64 smp_mb();
65 bd = bh->b_bdev;
66 if (bd)
67 blk_run_address_space(bd->bd_inode->i_mapping);
68 io_schedule();
69 return 0;
72 void fastcall __lock_buffer(struct buffer_head *bh)
74 wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
75 TASK_UNINTERRUPTIBLE);
77 EXPORT_SYMBOL(__lock_buffer);
79 void fastcall unlock_buffer(struct buffer_head *bh)
81 clear_buffer_locked(bh);
82 smp_mb__after_clear_bit();
83 wake_up_bit(&bh->b_state, BH_Lock);
87 * Block until a buffer comes unlocked. This doesn't stop it
88 * from becoming locked again - you have to lock it yourself
89 * if you want to preserve its state.
91 void __wait_on_buffer(struct buffer_head * bh)
93 wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
96 static void
97 __clear_page_buffers(struct page *page)
99 ClearPagePrivate(page);
100 set_page_private(page, 0);
101 page_cache_release(page);
104 static void buffer_io_error(struct buffer_head *bh)
106 char b[BDEVNAME_SIZE];
108 printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
109 bdevname(bh->b_bdev, b),
110 (unsigned long long)bh->b_blocknr);
114 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
115 * unlock the buffer. This is what ll_rw_block uses too.
117 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
119 if (uptodate) {
120 set_buffer_uptodate(bh);
121 } else {
122 /* This happens, due to failed READA attempts. */
123 clear_buffer_uptodate(bh);
125 unlock_buffer(bh);
126 put_bh(bh);
129 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
131 char b[BDEVNAME_SIZE];
133 if (uptodate) {
134 set_buffer_uptodate(bh);
135 } else {
136 if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
137 buffer_io_error(bh);
138 printk(KERN_WARNING "lost page write due to "
139 "I/O error on %s\n",
140 bdevname(bh->b_bdev, b));
142 set_buffer_write_io_error(bh);
143 clear_buffer_uptodate(bh);
145 unlock_buffer(bh);
146 put_bh(bh);
150 * Write out and wait upon all the dirty data associated with a block
151 * device via its mapping. Does not take the superblock lock.
153 int sync_blockdev(struct block_device *bdev)
155 int ret = 0;
157 if (bdev)
158 ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
159 return ret;
161 EXPORT_SYMBOL(sync_blockdev);
163 static void __fsync_super(struct super_block *sb)
165 sync_inodes_sb(sb, 0);
166 DQUOT_SYNC(sb);
167 lock_super(sb);
168 if (sb->s_dirt && sb->s_op->write_super)
169 sb->s_op->write_super(sb);
170 unlock_super(sb);
171 if (sb->s_op->sync_fs)
172 sb->s_op->sync_fs(sb, 1);
173 sync_blockdev(sb->s_bdev);
174 sync_inodes_sb(sb, 1);
178 * Write out and wait upon all dirty data associated with this
179 * superblock. Filesystem data as well as the underlying block
180 * device. Takes the superblock lock.
182 int fsync_super(struct super_block *sb)
184 __fsync_super(sb);
185 return sync_blockdev(sb->s_bdev);
189 * Write out and wait upon all dirty data associated with this
190 * device. Filesystem data as well as the underlying block
191 * device. Takes the superblock lock.
193 int fsync_bdev(struct block_device *bdev)
195 struct super_block *sb = get_super(bdev);
196 if (sb) {
197 int res = fsync_super(sb);
198 drop_super(sb);
199 return res;
201 return sync_blockdev(bdev);
205 * freeze_bdev -- lock a filesystem and force it into a consistent state
206 * @bdev: blockdevice to lock
208 * This takes the block device bd_mount_mutex to make sure no new mounts
209 * happen on bdev until thaw_bdev() is called.
210 * If a superblock is found on this device, we take the s_umount semaphore
211 * on it to make sure nobody unmounts until the snapshot creation is done.
213 struct super_block *freeze_bdev(struct block_device *bdev)
215 struct super_block *sb;
217 mutex_lock(&bdev->bd_mount_mutex);
218 sb = get_super(bdev);
219 if (sb && !(sb->s_flags & MS_RDONLY)) {
220 sb->s_frozen = SB_FREEZE_WRITE;
221 smp_wmb();
223 __fsync_super(sb);
225 sb->s_frozen = SB_FREEZE_TRANS;
226 smp_wmb();
228 sync_blockdev(sb->s_bdev);
230 if (sb->s_op->write_super_lockfs)
231 sb->s_op->write_super_lockfs(sb);
234 sync_blockdev(bdev);
235 return sb; /* thaw_bdev releases s->s_umount and bd_mount_sem */
237 EXPORT_SYMBOL(freeze_bdev);
240 * thaw_bdev -- unlock filesystem
241 * @bdev: blockdevice to unlock
242 * @sb: associated superblock
244 * Unlocks the filesystem and marks it writeable again after freeze_bdev().
246 void thaw_bdev(struct block_device *bdev, struct super_block *sb)
248 if (sb) {
249 BUG_ON(sb->s_bdev != bdev);
251 if (sb->s_op->unlockfs)
252 sb->s_op->unlockfs(sb);
253 sb->s_frozen = SB_UNFROZEN;
254 smp_wmb();
255 wake_up(&sb->s_wait_unfrozen);
256 drop_super(sb);
259 mutex_unlock(&bdev->bd_mount_mutex);
261 EXPORT_SYMBOL(thaw_bdev);
264 * sync everything. Start out by waking pdflush, because that writes back
265 * all queues in parallel.
267 static void do_sync(unsigned long wait)
269 wakeup_pdflush(0);
270 sync_inodes(0); /* All mappings, inodes and their blockdevs */
271 DQUOT_SYNC(NULL);
272 sync_supers(); /* Write the superblocks */
273 sync_filesystems(0); /* Start syncing the filesystems */
274 sync_filesystems(wait); /* Waitingly sync the filesystems */
275 sync_inodes(wait); /* Mappings, inodes and blockdevs, again. */
276 if (!wait)
277 printk("Emergency Sync complete\n");
278 if (unlikely(laptop_mode))
279 laptop_sync_completion();
282 asmlinkage long sys_sync(void)
284 do_sync(1);
285 return 0;
288 void emergency_sync(void)
290 pdflush_operation(do_sync, 0);
294 * Generic function to fsync a file.
296 * filp may be NULL if called via the msync of a vma.
299 int file_fsync(struct file *filp, struct dentry *dentry, int datasync)
301 struct inode * inode = dentry->d_inode;
302 struct super_block * sb;
303 int ret, err;
305 /* sync the inode to buffers */
306 ret = write_inode_now(inode, 0);
308 /* sync the superblock to buffers */
309 sb = inode->i_sb;
310 lock_super(sb);
311 if (sb->s_op->write_super)
312 sb->s_op->write_super(sb);
313 unlock_super(sb);
315 /* .. finally sync the buffers to disk */
316 err = sync_blockdev(sb->s_bdev);
317 if (!ret)
318 ret = err;
319 return ret;
322 long do_fsync(struct file *file, int datasync)
324 int ret;
325 int err;
326 struct address_space *mapping = file->f_mapping;
328 if (!file->f_op || !file->f_op->fsync) {
329 /* Why? We can still call filemap_fdatawrite */
330 ret = -EINVAL;
331 goto out;
334 current->flags |= PF_SYNCWRITE;
335 ret = filemap_fdatawrite(mapping);
338 * We need to protect against concurrent writers, which could cause
339 * livelocks in fsync_buffers_list().
341 mutex_lock(&mapping->host->i_mutex);
342 err = file->f_op->fsync(file, file->f_dentry, datasync);
343 if (!ret)
344 ret = err;
345 mutex_unlock(&mapping->host->i_mutex);
346 err = filemap_fdatawait(mapping);
347 if (!ret)
348 ret = err;
349 current->flags &= ~PF_SYNCWRITE;
350 out:
351 return ret;
354 static long __do_fsync(unsigned int fd, int datasync)
356 struct file *file;
357 int ret = -EBADF;
359 file = fget(fd);
360 if (file) {
361 ret = do_fsync(file, datasync);
362 fput(file);
364 return ret;
367 asmlinkage long sys_fsync(unsigned int fd)
369 return __do_fsync(fd, 0);
372 asmlinkage long sys_fdatasync(unsigned int fd)
374 return __do_fsync(fd, 1);
378 * Various filesystems appear to want __find_get_block to be non-blocking.
379 * But it's the page lock which protects the buffers. To get around this,
380 * we get exclusion from try_to_free_buffers with the blockdev mapping's
381 * private_lock.
383 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
384 * may be quite high. This code could TryLock the page, and if that
385 * succeeds, there is no need to take private_lock. (But if
386 * private_lock is contended then so is mapping->tree_lock).
388 static struct buffer_head *
389 __find_get_block_slow(struct block_device *bdev, sector_t block)
391 struct inode *bd_inode = bdev->bd_inode;
392 struct address_space *bd_mapping = bd_inode->i_mapping;
393 struct buffer_head *ret = NULL;
394 pgoff_t index;
395 struct buffer_head *bh;
396 struct buffer_head *head;
397 struct page *page;
398 int all_mapped = 1;
400 index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
401 page = find_get_page(bd_mapping, index);
402 if (!page)
403 goto out;
405 spin_lock(&bd_mapping->private_lock);
406 if (!page_has_buffers(page))
407 goto out_unlock;
408 head = page_buffers(page);
409 bh = head;
410 do {
411 if (bh->b_blocknr == block) {
412 ret = bh;
413 get_bh(bh);
414 goto out_unlock;
416 if (!buffer_mapped(bh))
417 all_mapped = 0;
418 bh = bh->b_this_page;
419 } while (bh != head);
421 /* we might be here because some of the buffers on this page are
422 * not mapped. This is due to various races between
423 * file io on the block device and getblk. It gets dealt with
424 * elsewhere, don't buffer_error if we had some unmapped buffers
426 if (all_mapped) {
427 printk("__find_get_block_slow() failed. "
428 "block=%llu, b_blocknr=%llu\n",
429 (unsigned long long)block, (unsigned long long)bh->b_blocknr);
430 printk("b_state=0x%08lx, b_size=%u\n", bh->b_state, bh->b_size);
431 printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
433 out_unlock:
434 spin_unlock(&bd_mapping->private_lock);
435 page_cache_release(page);
436 out:
437 return ret;
440 /* If invalidate_buffers() will trash dirty buffers, it means some kind
441 of fs corruption is going on. Trashing dirty data always imply losing
442 information that was supposed to be just stored on the physical layer
443 by the user.
445 Thus invalidate_buffers in general usage is not allwowed to trash
446 dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
447 be preserved. These buffers are simply skipped.
449 We also skip buffers which are still in use. For example this can
450 happen if a userspace program is reading the block device.
452 NOTE: In the case where the user removed a removable-media-disk even if
453 there's still dirty data not synced on disk (due a bug in the device driver
454 or due an error of the user), by not destroying the dirty buffers we could
455 generate corruption also on the next media inserted, thus a parameter is
456 necessary to handle this case in the most safe way possible (trying
457 to not corrupt also the new disk inserted with the data belonging to
458 the old now corrupted disk). Also for the ramdisk the natural thing
459 to do in order to release the ramdisk memory is to destroy dirty buffers.
461 These are two special cases. Normal usage imply the device driver
462 to issue a sync on the device (without waiting I/O completion) and
463 then an invalidate_buffers call that doesn't trash dirty buffers.
465 For handling cache coherency with the blkdev pagecache the 'update' case
466 is been introduced. It is needed to re-read from disk any pinned
467 buffer. NOTE: re-reading from disk is destructive so we can do it only
468 when we assume nobody is changing the buffercache under our I/O and when
469 we think the disk contains more recent information than the buffercache.
470 The update == 1 pass marks the buffers we need to update, the update == 2
471 pass does the actual I/O. */
472 void invalidate_bdev(struct block_device *bdev, int destroy_dirty_buffers)
474 invalidate_bh_lrus();
476 * FIXME: what about destroy_dirty_buffers?
477 * We really want to use invalidate_inode_pages2() for
478 * that, but not until that's cleaned up.
480 invalidate_inode_pages(bdev->bd_inode->i_mapping);
484 * Kick pdflush then try to free up some ZONE_NORMAL memory.
486 static void free_more_memory(void)
488 struct zone **zones;
489 pg_data_t *pgdat;
491 wakeup_pdflush(1024);
492 yield();
494 for_each_pgdat(pgdat) {
495 zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones;
496 if (*zones)
497 try_to_free_pages(zones, GFP_NOFS);
502 * I/O completion handler for block_read_full_page() - pages
503 * which come unlocked at the end of I/O.
505 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
507 unsigned long flags;
508 struct buffer_head *first;
509 struct buffer_head *tmp;
510 struct page *page;
511 int page_uptodate = 1;
513 BUG_ON(!buffer_async_read(bh));
515 page = bh->b_page;
516 if (uptodate) {
517 set_buffer_uptodate(bh);
518 } else {
519 clear_buffer_uptodate(bh);
520 if (printk_ratelimit())
521 buffer_io_error(bh);
522 SetPageError(page);
526 * Be _very_ careful from here on. Bad things can happen if
527 * two buffer heads end IO at almost the same time and both
528 * decide that the page is now completely done.
530 first = page_buffers(page);
531 local_irq_save(flags);
532 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
533 clear_buffer_async_read(bh);
534 unlock_buffer(bh);
535 tmp = bh;
536 do {
537 if (!buffer_uptodate(tmp))
538 page_uptodate = 0;
539 if (buffer_async_read(tmp)) {
540 BUG_ON(!buffer_locked(tmp));
541 goto still_busy;
543 tmp = tmp->b_this_page;
544 } while (tmp != bh);
545 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
546 local_irq_restore(flags);
549 * If none of the buffers had errors and they are all
550 * uptodate then we can set the page uptodate.
552 if (page_uptodate && !PageError(page))
553 SetPageUptodate(page);
554 unlock_page(page);
555 return;
557 still_busy:
558 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
559 local_irq_restore(flags);
560 return;
564 * Completion handler for block_write_full_page() - pages which are unlocked
565 * during I/O, and which have PageWriteback cleared upon I/O completion.
567 void end_buffer_async_write(struct buffer_head *bh, int uptodate)
569 char b[BDEVNAME_SIZE];
570 unsigned long flags;
571 struct buffer_head *first;
572 struct buffer_head *tmp;
573 struct page *page;
575 BUG_ON(!buffer_async_write(bh));
577 page = bh->b_page;
578 if (uptodate) {
579 set_buffer_uptodate(bh);
580 } else {
581 if (printk_ratelimit()) {
582 buffer_io_error(bh);
583 printk(KERN_WARNING "lost page write due to "
584 "I/O error on %s\n",
585 bdevname(bh->b_bdev, b));
587 set_bit(AS_EIO, &page->mapping->flags);
588 clear_buffer_uptodate(bh);
589 SetPageError(page);
592 first = page_buffers(page);
593 local_irq_save(flags);
594 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
596 clear_buffer_async_write(bh);
597 unlock_buffer(bh);
598 tmp = bh->b_this_page;
599 while (tmp != bh) {
600 if (buffer_async_write(tmp)) {
601 BUG_ON(!buffer_locked(tmp));
602 goto still_busy;
604 tmp = tmp->b_this_page;
606 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
607 local_irq_restore(flags);
608 end_page_writeback(page);
609 return;
611 still_busy:
612 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
613 local_irq_restore(flags);
614 return;
618 * If a page's buffers are under async readin (end_buffer_async_read
619 * completion) then there is a possibility that another thread of
620 * control could lock one of the buffers after it has completed
621 * but while some of the other buffers have not completed. This
622 * locked buffer would confuse end_buffer_async_read() into not unlocking
623 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
624 * that this buffer is not under async I/O.
626 * The page comes unlocked when it has no locked buffer_async buffers
627 * left.
629 * PageLocked prevents anyone starting new async I/O reads any of
630 * the buffers.
632 * PageWriteback is used to prevent simultaneous writeout of the same
633 * page.
635 * PageLocked prevents anyone from starting writeback of a page which is
636 * under read I/O (PageWriteback is only ever set against a locked page).
638 static void mark_buffer_async_read(struct buffer_head *bh)
640 bh->b_end_io = end_buffer_async_read;
641 set_buffer_async_read(bh);
644 void mark_buffer_async_write(struct buffer_head *bh)
646 bh->b_end_io = end_buffer_async_write;
647 set_buffer_async_write(bh);
649 EXPORT_SYMBOL(mark_buffer_async_write);
653 * fs/buffer.c contains helper functions for buffer-backed address space's
654 * fsync functions. A common requirement for buffer-based filesystems is
655 * that certain data from the backing blockdev needs to be written out for
656 * a successful fsync(). For example, ext2 indirect blocks need to be
657 * written back and waited upon before fsync() returns.
659 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
660 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
661 * management of a list of dependent buffers at ->i_mapping->private_list.
663 * Locking is a little subtle: try_to_free_buffers() will remove buffers
664 * from their controlling inode's queue when they are being freed. But
665 * try_to_free_buffers() will be operating against the *blockdev* mapping
666 * at the time, not against the S_ISREG file which depends on those buffers.
667 * So the locking for private_list is via the private_lock in the address_space
668 * which backs the buffers. Which is different from the address_space
669 * against which the buffers are listed. So for a particular address_space,
670 * mapping->private_lock does *not* protect mapping->private_list! In fact,
671 * mapping->private_list will always be protected by the backing blockdev's
672 * ->private_lock.
674 * Which introduces a requirement: all buffers on an address_space's
675 * ->private_list must be from the same address_space: the blockdev's.
677 * address_spaces which do not place buffers at ->private_list via these
678 * utility functions are free to use private_lock and private_list for
679 * whatever they want. The only requirement is that list_empty(private_list)
680 * be true at clear_inode() time.
682 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
683 * filesystems should do that. invalidate_inode_buffers() should just go
684 * BUG_ON(!list_empty).
686 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
687 * take an address_space, not an inode. And it should be called
688 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
689 * queued up.
691 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
692 * list if it is already on a list. Because if the buffer is on a list,
693 * it *must* already be on the right one. If not, the filesystem is being
694 * silly. This will save a ton of locking. But first we have to ensure
695 * that buffers are taken *off* the old inode's list when they are freed
696 * (presumably in truncate). That requires careful auditing of all
697 * filesystems (do it inside bforget()). It could also be done by bringing
698 * b_inode back.
702 * The buffer's backing address_space's private_lock must be held
704 static inline void __remove_assoc_queue(struct buffer_head *bh)
706 list_del_init(&bh->b_assoc_buffers);
709 int inode_has_buffers(struct inode *inode)
711 return !list_empty(&inode->i_data.private_list);
715 * osync is designed to support O_SYNC io. It waits synchronously for
716 * all already-submitted IO to complete, but does not queue any new
717 * writes to the disk.
719 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
720 * you dirty the buffers, and then use osync_inode_buffers to wait for
721 * completion. Any other dirty buffers which are not yet queued for
722 * write will not be flushed to disk by the osync.
724 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
726 struct buffer_head *bh;
727 struct list_head *p;
728 int err = 0;
730 spin_lock(lock);
731 repeat:
732 list_for_each_prev(p, list) {
733 bh = BH_ENTRY(p);
734 if (buffer_locked(bh)) {
735 get_bh(bh);
736 spin_unlock(lock);
737 wait_on_buffer(bh);
738 if (!buffer_uptodate(bh))
739 err = -EIO;
740 brelse(bh);
741 spin_lock(lock);
742 goto repeat;
745 spin_unlock(lock);
746 return err;
750 * sync_mapping_buffers - write out and wait upon a mapping's "associated"
751 * buffers
752 * @mapping: the mapping which wants those buffers written
754 * Starts I/O against the buffers at mapping->private_list, and waits upon
755 * that I/O.
757 * Basically, this is a convenience function for fsync().
758 * @mapping is a file or directory which needs those buffers to be written for
759 * a successful fsync().
761 int sync_mapping_buffers(struct address_space *mapping)
763 struct address_space *buffer_mapping = mapping->assoc_mapping;
765 if (buffer_mapping == NULL || list_empty(&mapping->private_list))
766 return 0;
768 return fsync_buffers_list(&buffer_mapping->private_lock,
769 &mapping->private_list);
771 EXPORT_SYMBOL(sync_mapping_buffers);
774 * Called when we've recently written block `bblock', and it is known that
775 * `bblock' was for a buffer_boundary() buffer. This means that the block at
776 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
777 * dirty, schedule it for IO. So that indirects merge nicely with their data.
779 void write_boundary_block(struct block_device *bdev,
780 sector_t bblock, unsigned blocksize)
782 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
783 if (bh) {
784 if (buffer_dirty(bh))
785 ll_rw_block(WRITE, 1, &bh);
786 put_bh(bh);
790 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
792 struct address_space *mapping = inode->i_mapping;
793 struct address_space *buffer_mapping = bh->b_page->mapping;
795 mark_buffer_dirty(bh);
796 if (!mapping->assoc_mapping) {
797 mapping->assoc_mapping = buffer_mapping;
798 } else {
799 if (mapping->assoc_mapping != buffer_mapping)
800 BUG();
802 if (list_empty(&bh->b_assoc_buffers)) {
803 spin_lock(&buffer_mapping->private_lock);
804 list_move_tail(&bh->b_assoc_buffers,
805 &mapping->private_list);
806 spin_unlock(&buffer_mapping->private_lock);
809 EXPORT_SYMBOL(mark_buffer_dirty_inode);
812 * Add a page to the dirty page list.
814 * It is a sad fact of life that this function is called from several places
815 * deeply under spinlocking. It may not sleep.
817 * If the page has buffers, the uptodate buffers are set dirty, to preserve
818 * dirty-state coherency between the page and the buffers. It the page does
819 * not have buffers then when they are later attached they will all be set
820 * dirty.
822 * The buffers are dirtied before the page is dirtied. There's a small race
823 * window in which a writepage caller may see the page cleanness but not the
824 * buffer dirtiness. That's fine. If this code were to set the page dirty
825 * before the buffers, a concurrent writepage caller could clear the page dirty
826 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
827 * page on the dirty page list.
829 * We use private_lock to lock against try_to_free_buffers while using the
830 * page's buffer list. Also use this to protect against clean buffers being
831 * added to the page after it was set dirty.
833 * FIXME: may need to call ->reservepage here as well. That's rather up to the
834 * address_space though.
836 int __set_page_dirty_buffers(struct page *page)
838 struct address_space * const mapping = page->mapping;
840 spin_lock(&mapping->private_lock);
841 if (page_has_buffers(page)) {
842 struct buffer_head *head = page_buffers(page);
843 struct buffer_head *bh = head;
845 do {
846 set_buffer_dirty(bh);
847 bh = bh->b_this_page;
848 } while (bh != head);
850 spin_unlock(&mapping->private_lock);
852 if (!TestSetPageDirty(page)) {
853 write_lock_irq(&mapping->tree_lock);
854 if (page->mapping) { /* Race with truncate? */
855 if (mapping_cap_account_dirty(mapping))
856 inc_page_state(nr_dirty);
857 radix_tree_tag_set(&mapping->page_tree,
858 page_index(page),
859 PAGECACHE_TAG_DIRTY);
861 write_unlock_irq(&mapping->tree_lock);
862 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
863 return 1;
865 return 0;
867 EXPORT_SYMBOL(__set_page_dirty_buffers);
870 * Write out and wait upon a list of buffers.
872 * We have conflicting pressures: we want to make sure that all
873 * initially dirty buffers get waited on, but that any subsequently
874 * dirtied buffers don't. After all, we don't want fsync to last
875 * forever if somebody is actively writing to the file.
877 * Do this in two main stages: first we copy dirty buffers to a
878 * temporary inode list, queueing the writes as we go. Then we clean
879 * up, waiting for those writes to complete.
881 * During this second stage, any subsequent updates to the file may end
882 * up refiling the buffer on the original inode's dirty list again, so
883 * there is a chance we will end up with a buffer queued for write but
884 * not yet completed on that list. So, as a final cleanup we go through
885 * the osync code to catch these locked, dirty buffers without requeuing
886 * any newly dirty buffers for write.
888 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
890 struct buffer_head *bh;
891 struct list_head tmp;
892 int err = 0, err2;
894 INIT_LIST_HEAD(&tmp);
896 spin_lock(lock);
897 while (!list_empty(list)) {
898 bh = BH_ENTRY(list->next);
899 list_del_init(&bh->b_assoc_buffers);
900 if (buffer_dirty(bh) || buffer_locked(bh)) {
901 list_add(&bh->b_assoc_buffers, &tmp);
902 if (buffer_dirty(bh)) {
903 get_bh(bh);
904 spin_unlock(lock);
906 * Ensure any pending I/O completes so that
907 * ll_rw_block() actually writes the current
908 * contents - it is a noop if I/O is still in
909 * flight on potentially older contents.
911 ll_rw_block(SWRITE, 1, &bh);
912 brelse(bh);
913 spin_lock(lock);
918 while (!list_empty(&tmp)) {
919 bh = BH_ENTRY(tmp.prev);
920 __remove_assoc_queue(bh);
921 get_bh(bh);
922 spin_unlock(lock);
923 wait_on_buffer(bh);
924 if (!buffer_uptodate(bh))
925 err = -EIO;
926 brelse(bh);
927 spin_lock(lock);
930 spin_unlock(lock);
931 err2 = osync_buffers_list(lock, list);
932 if (err)
933 return err;
934 else
935 return err2;
939 * Invalidate any and all dirty buffers on a given inode. We are
940 * probably unmounting the fs, but that doesn't mean we have already
941 * done a sync(). Just drop the buffers from the inode list.
943 * NOTE: we take the inode's blockdev's mapping's private_lock. Which
944 * assumes that all the buffers are against the blockdev. Not true
945 * for reiserfs.
947 void invalidate_inode_buffers(struct inode *inode)
949 if (inode_has_buffers(inode)) {
950 struct address_space *mapping = &inode->i_data;
951 struct list_head *list = &mapping->private_list;
952 struct address_space *buffer_mapping = mapping->assoc_mapping;
954 spin_lock(&buffer_mapping->private_lock);
955 while (!list_empty(list))
956 __remove_assoc_queue(BH_ENTRY(list->next));
957 spin_unlock(&buffer_mapping->private_lock);
962 * Remove any clean buffers from the inode's buffer list. This is called
963 * when we're trying to free the inode itself. Those buffers can pin it.
965 * Returns true if all buffers were removed.
967 int remove_inode_buffers(struct inode *inode)
969 int ret = 1;
971 if (inode_has_buffers(inode)) {
972 struct address_space *mapping = &inode->i_data;
973 struct list_head *list = &mapping->private_list;
974 struct address_space *buffer_mapping = mapping->assoc_mapping;
976 spin_lock(&buffer_mapping->private_lock);
977 while (!list_empty(list)) {
978 struct buffer_head *bh = BH_ENTRY(list->next);
979 if (buffer_dirty(bh)) {
980 ret = 0;
981 break;
983 __remove_assoc_queue(bh);
985 spin_unlock(&buffer_mapping->private_lock);
987 return ret;
991 * Create the appropriate buffers when given a page for data area and
992 * the size of each buffer.. Use the bh->b_this_page linked list to
993 * follow the buffers created. Return NULL if unable to create more
994 * buffers.
996 * The retry flag is used to differentiate async IO (paging, swapping)
997 * which may not fail from ordinary buffer allocations.
999 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
1000 int retry)
1002 struct buffer_head *bh, *head;
1003 long offset;
1005 try_again:
1006 head = NULL;
1007 offset = PAGE_SIZE;
1008 while ((offset -= size) >= 0) {
1009 bh = alloc_buffer_head(GFP_NOFS);
1010 if (!bh)
1011 goto no_grow;
1013 bh->b_bdev = NULL;
1014 bh->b_this_page = head;
1015 bh->b_blocknr = -1;
1016 head = bh;
1018 bh->b_state = 0;
1019 atomic_set(&bh->b_count, 0);
1020 bh->b_private = NULL;
1021 bh->b_size = size;
1023 /* Link the buffer to its page */
1024 set_bh_page(bh, page, offset);
1026 init_buffer(bh, NULL, NULL);
1028 return head;
1030 * In case anything failed, we just free everything we got.
1032 no_grow:
1033 if (head) {
1034 do {
1035 bh = head;
1036 head = head->b_this_page;
1037 free_buffer_head(bh);
1038 } while (head);
1042 * Return failure for non-async IO requests. Async IO requests
1043 * are not allowed to fail, so we have to wait until buffer heads
1044 * become available. But we don't want tasks sleeping with
1045 * partially complete buffers, so all were released above.
1047 if (!retry)
1048 return NULL;
1050 /* We're _really_ low on memory. Now we just
1051 * wait for old buffer heads to become free due to
1052 * finishing IO. Since this is an async request and
1053 * the reserve list is empty, we're sure there are
1054 * async buffer heads in use.
1056 free_more_memory();
1057 goto try_again;
1059 EXPORT_SYMBOL_GPL(alloc_page_buffers);
1061 static inline void
1062 link_dev_buffers(struct page *page, struct buffer_head *head)
1064 struct buffer_head *bh, *tail;
1066 bh = head;
1067 do {
1068 tail = bh;
1069 bh = bh->b_this_page;
1070 } while (bh);
1071 tail->b_this_page = head;
1072 attach_page_buffers(page, head);
1076 * Initialise the state of a blockdev page's buffers.
1078 static void
1079 init_page_buffers(struct page *page, struct block_device *bdev,
1080 sector_t block, int size)
1082 struct buffer_head *head = page_buffers(page);
1083 struct buffer_head *bh = head;
1084 int uptodate = PageUptodate(page);
1086 do {
1087 if (!buffer_mapped(bh)) {
1088 init_buffer(bh, NULL, NULL);
1089 bh->b_bdev = bdev;
1090 bh->b_blocknr = block;
1091 if (uptodate)
1092 set_buffer_uptodate(bh);
1093 set_buffer_mapped(bh);
1095 block++;
1096 bh = bh->b_this_page;
1097 } while (bh != head);
1101 * Create the page-cache page that contains the requested block.
1103 * This is user purely for blockdev mappings.
1105 static struct page *
1106 grow_dev_page(struct block_device *bdev, sector_t block,
1107 pgoff_t index, int size)
1109 struct inode *inode = bdev->bd_inode;
1110 struct page *page;
1111 struct buffer_head *bh;
1113 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
1114 if (!page)
1115 return NULL;
1117 if (!PageLocked(page))
1118 BUG();
1120 if (page_has_buffers(page)) {
1121 bh = page_buffers(page);
1122 if (bh->b_size == size) {
1123 init_page_buffers(page, bdev, block, size);
1124 return page;
1126 if (!try_to_free_buffers(page))
1127 goto failed;
1131 * Allocate some buffers for this page
1133 bh = alloc_page_buffers(page, size, 0);
1134 if (!bh)
1135 goto failed;
1138 * Link the page to the buffers and initialise them. Take the
1139 * lock to be atomic wrt __find_get_block(), which does not
1140 * run under the page lock.
1142 spin_lock(&inode->i_mapping->private_lock);
1143 link_dev_buffers(page, bh);
1144 init_page_buffers(page, bdev, block, size);
1145 spin_unlock(&inode->i_mapping->private_lock);
1146 return page;
1148 failed:
1149 BUG();
1150 unlock_page(page);
1151 page_cache_release(page);
1152 return NULL;
1156 * Create buffers for the specified block device block's page. If
1157 * that page was dirty, the buffers are set dirty also.
1159 * Except that's a bug. Attaching dirty buffers to a dirty
1160 * blockdev's page can result in filesystem corruption, because
1161 * some of those buffers may be aliases of filesystem data.
1162 * grow_dev_page() will go BUG() if this happens.
1164 static int
1165 grow_buffers(struct block_device *bdev, sector_t block, int size)
1167 struct page *page;
1168 pgoff_t index;
1169 int sizebits;
1171 sizebits = -1;
1172 do {
1173 sizebits++;
1174 } while ((size << sizebits) < PAGE_SIZE);
1176 index = block >> sizebits;
1177 block = index << sizebits;
1179 /* Create a page with the proper size buffers.. */
1180 page = grow_dev_page(bdev, block, index, size);
1181 if (!page)
1182 return 0;
1183 unlock_page(page);
1184 page_cache_release(page);
1185 return 1;
1188 static struct buffer_head *
1189 __getblk_slow(struct block_device *bdev, sector_t block, int size)
1191 /* Size must be multiple of hard sectorsize */
1192 if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
1193 (size < 512 || size > PAGE_SIZE))) {
1194 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1195 size);
1196 printk(KERN_ERR "hardsect size: %d\n",
1197 bdev_hardsect_size(bdev));
1199 dump_stack();
1200 return NULL;
1203 for (;;) {
1204 struct buffer_head * bh;
1206 bh = __find_get_block(bdev, block, size);
1207 if (bh)
1208 return bh;
1210 if (!grow_buffers(bdev, block, size))
1211 free_more_memory();
1216 * The relationship between dirty buffers and dirty pages:
1218 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1219 * the page is tagged dirty in its radix tree.
1221 * At all times, the dirtiness of the buffers represents the dirtiness of
1222 * subsections of the page. If the page has buffers, the page dirty bit is
1223 * merely a hint about the true dirty state.
1225 * When a page is set dirty in its entirety, all its buffers are marked dirty
1226 * (if the page has buffers).
1228 * When a buffer is marked dirty, its page is dirtied, but the page's other
1229 * buffers are not.
1231 * Also. When blockdev buffers are explicitly read with bread(), they
1232 * individually become uptodate. But their backing page remains not
1233 * uptodate - even if all of its buffers are uptodate. A subsequent
1234 * block_read_full_page() against that page will discover all the uptodate
1235 * buffers, will set the page uptodate and will perform no I/O.
1239 * mark_buffer_dirty - mark a buffer_head as needing writeout
1240 * @bh: the buffer_head to mark dirty
1242 * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1243 * backing page dirty, then tag the page as dirty in its address_space's radix
1244 * tree and then attach the address_space's inode to its superblock's dirty
1245 * inode list.
1247 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1248 * mapping->tree_lock and the global inode_lock.
1250 void fastcall mark_buffer_dirty(struct buffer_head *bh)
1252 if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
1253 __set_page_dirty_nobuffers(bh->b_page);
1257 * Decrement a buffer_head's reference count. If all buffers against a page
1258 * have zero reference count, are clean and unlocked, and if the page is clean
1259 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1260 * in preparation for freeing it (sometimes, rarely, buffers are removed from
1261 * a page but it ends up not being freed, and buffers may later be reattached).
1263 void __brelse(struct buffer_head * buf)
1265 if (atomic_read(&buf->b_count)) {
1266 put_bh(buf);
1267 return;
1269 printk(KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1270 WARN_ON(1);
1274 * bforget() is like brelse(), except it discards any
1275 * potentially dirty data.
1277 void __bforget(struct buffer_head *bh)
1279 clear_buffer_dirty(bh);
1280 if (!list_empty(&bh->b_assoc_buffers)) {
1281 struct address_space *buffer_mapping = bh->b_page->mapping;
1283 spin_lock(&buffer_mapping->private_lock);
1284 list_del_init(&bh->b_assoc_buffers);
1285 spin_unlock(&buffer_mapping->private_lock);
1287 __brelse(bh);
1290 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1292 lock_buffer(bh);
1293 if (buffer_uptodate(bh)) {
1294 unlock_buffer(bh);
1295 return bh;
1296 } else {
1297 get_bh(bh);
1298 bh->b_end_io = end_buffer_read_sync;
1299 submit_bh(READ, bh);
1300 wait_on_buffer(bh);
1301 if (buffer_uptodate(bh))
1302 return bh;
1304 brelse(bh);
1305 return NULL;
1309 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1310 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1311 * refcount elevated by one when they're in an LRU. A buffer can only appear
1312 * once in a particular CPU's LRU. A single buffer can be present in multiple
1313 * CPU's LRUs at the same time.
1315 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1316 * sb_find_get_block().
1318 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1319 * a local interrupt disable for that.
1322 #define BH_LRU_SIZE 8
1324 struct bh_lru {
1325 struct buffer_head *bhs[BH_LRU_SIZE];
1328 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1330 #ifdef CONFIG_SMP
1331 #define bh_lru_lock() local_irq_disable()
1332 #define bh_lru_unlock() local_irq_enable()
1333 #else
1334 #define bh_lru_lock() preempt_disable()
1335 #define bh_lru_unlock() preempt_enable()
1336 #endif
1338 static inline void check_irqs_on(void)
1340 #ifdef irqs_disabled
1341 BUG_ON(irqs_disabled());
1342 #endif
1346 * The LRU management algorithm is dopey-but-simple. Sorry.
1348 static void bh_lru_install(struct buffer_head *bh)
1350 struct buffer_head *evictee = NULL;
1351 struct bh_lru *lru;
1353 check_irqs_on();
1354 bh_lru_lock();
1355 lru = &__get_cpu_var(bh_lrus);
1356 if (lru->bhs[0] != bh) {
1357 struct buffer_head *bhs[BH_LRU_SIZE];
1358 int in;
1359 int out = 0;
1361 get_bh(bh);
1362 bhs[out++] = bh;
1363 for (in = 0; in < BH_LRU_SIZE; in++) {
1364 struct buffer_head *bh2 = lru->bhs[in];
1366 if (bh2 == bh) {
1367 __brelse(bh2);
1368 } else {
1369 if (out >= BH_LRU_SIZE) {
1370 BUG_ON(evictee != NULL);
1371 evictee = bh2;
1372 } else {
1373 bhs[out++] = bh2;
1377 while (out < BH_LRU_SIZE)
1378 bhs[out++] = NULL;
1379 memcpy(lru->bhs, bhs, sizeof(bhs));
1381 bh_lru_unlock();
1383 if (evictee)
1384 __brelse(evictee);
1388 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1390 static struct buffer_head *
1391 lookup_bh_lru(struct block_device *bdev, sector_t block, int size)
1393 struct buffer_head *ret = NULL;
1394 struct bh_lru *lru;
1395 int i;
1397 check_irqs_on();
1398 bh_lru_lock();
1399 lru = &__get_cpu_var(bh_lrus);
1400 for (i = 0; i < BH_LRU_SIZE; i++) {
1401 struct buffer_head *bh = lru->bhs[i];
1403 if (bh && bh->b_bdev == bdev &&
1404 bh->b_blocknr == block && bh->b_size == size) {
1405 if (i) {
1406 while (i) {
1407 lru->bhs[i] = lru->bhs[i - 1];
1408 i--;
1410 lru->bhs[0] = bh;
1412 get_bh(bh);
1413 ret = bh;
1414 break;
1417 bh_lru_unlock();
1418 return ret;
1422 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1423 * it in the LRU and mark it as accessed. If it is not present then return
1424 * NULL
1426 struct buffer_head *
1427 __find_get_block(struct block_device *bdev, sector_t block, int size)
1429 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1431 if (bh == NULL) {
1432 bh = __find_get_block_slow(bdev, block);
1433 if (bh)
1434 bh_lru_install(bh);
1436 if (bh)
1437 touch_buffer(bh);
1438 return bh;
1440 EXPORT_SYMBOL(__find_get_block);
1443 * __getblk will locate (and, if necessary, create) the buffer_head
1444 * which corresponds to the passed block_device, block and size. The
1445 * returned buffer has its reference count incremented.
1447 * __getblk() cannot fail - it just keeps trying. If you pass it an
1448 * illegal block number, __getblk() will happily return a buffer_head
1449 * which represents the non-existent block. Very weird.
1451 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1452 * attempt is failing. FIXME, perhaps?
1454 struct buffer_head *
1455 __getblk(struct block_device *bdev, sector_t block, int size)
1457 struct buffer_head *bh = __find_get_block(bdev, block, size);
1459 might_sleep();
1460 if (bh == NULL)
1461 bh = __getblk_slow(bdev, block, size);
1462 return bh;
1464 EXPORT_SYMBOL(__getblk);
1467 * Do async read-ahead on a buffer..
1469 void __breadahead(struct block_device *bdev, sector_t block, int size)
1471 struct buffer_head *bh = __getblk(bdev, block, size);
1472 if (likely(bh)) {
1473 ll_rw_block(READA, 1, &bh);
1474 brelse(bh);
1477 EXPORT_SYMBOL(__breadahead);
1480 * __bread() - reads a specified block and returns the bh
1481 * @bdev: the block_device to read from
1482 * @block: number of block
1483 * @size: size (in bytes) to read
1485 * Reads a specified block, and returns buffer head that contains it.
1486 * It returns NULL if the block was unreadable.
1488 struct buffer_head *
1489 __bread(struct block_device *bdev, sector_t block, int size)
1491 struct buffer_head *bh = __getblk(bdev, block, size);
1493 if (likely(bh) && !buffer_uptodate(bh))
1494 bh = __bread_slow(bh);
1495 return bh;
1497 EXPORT_SYMBOL(__bread);
1500 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1501 * This doesn't race because it runs in each cpu either in irq
1502 * or with preempt disabled.
1504 static void invalidate_bh_lru(void *arg)
1506 struct bh_lru *b = &get_cpu_var(bh_lrus);
1507 int i;
1509 for (i = 0; i < BH_LRU_SIZE; i++) {
1510 brelse(b->bhs[i]);
1511 b->bhs[i] = NULL;
1513 put_cpu_var(bh_lrus);
1516 static void invalidate_bh_lrus(void)
1518 on_each_cpu(invalidate_bh_lru, NULL, 1, 1);
1521 void set_bh_page(struct buffer_head *bh,
1522 struct page *page, unsigned long offset)
1524 bh->b_page = page;
1525 if (offset >= PAGE_SIZE)
1526 BUG();
1527 if (PageHighMem(page))
1529 * This catches illegal uses and preserves the offset:
1531 bh->b_data = (char *)(0 + offset);
1532 else
1533 bh->b_data = page_address(page) + offset;
1535 EXPORT_SYMBOL(set_bh_page);
1538 * Called when truncating a buffer on a page completely.
1540 static void discard_buffer(struct buffer_head * bh)
1542 lock_buffer(bh);
1543 clear_buffer_dirty(bh);
1544 bh->b_bdev = NULL;
1545 clear_buffer_mapped(bh);
1546 clear_buffer_req(bh);
1547 clear_buffer_new(bh);
1548 clear_buffer_delay(bh);
1549 unlock_buffer(bh);
1553 * try_to_release_page() - release old fs-specific metadata on a page
1555 * @page: the page which the kernel is trying to free
1556 * @gfp_mask: memory allocation flags (and I/O mode)
1558 * The address_space is to try to release any data against the page
1559 * (presumably at page->private). If the release was successful, return `1'.
1560 * Otherwise return zero.
1562 * The @gfp_mask argument specifies whether I/O may be performed to release
1563 * this page (__GFP_IO), and whether the call may block (__GFP_WAIT).
1565 * NOTE: @gfp_mask may go away, and this function may become non-blocking.
1567 int try_to_release_page(struct page *page, gfp_t gfp_mask)
1569 struct address_space * const mapping = page->mapping;
1571 BUG_ON(!PageLocked(page));
1572 if (PageWriteback(page))
1573 return 0;
1575 if (mapping && mapping->a_ops->releasepage)
1576 return mapping->a_ops->releasepage(page, gfp_mask);
1577 return try_to_free_buffers(page);
1579 EXPORT_SYMBOL(try_to_release_page);
1582 * block_invalidatepage - invalidate part of all of a buffer-backed page
1584 * @page: the page which is affected
1585 * @offset: the index of the truncation point
1587 * block_invalidatepage() is called when all or part of the page has become
1588 * invalidatedby a truncate operation.
1590 * block_invalidatepage() does not have to release all buffers, but it must
1591 * ensure that no dirty buffer is left outside @offset and that no I/O
1592 * is underway against any of the blocks which are outside the truncation
1593 * point. Because the caller is about to free (and possibly reuse) those
1594 * blocks on-disk.
1596 int block_invalidatepage(struct page *page, unsigned long offset)
1598 struct buffer_head *head, *bh, *next;
1599 unsigned int curr_off = 0;
1600 int ret = 1;
1602 BUG_ON(!PageLocked(page));
1603 if (!page_has_buffers(page))
1604 goto out;
1606 head = page_buffers(page);
1607 bh = head;
1608 do {
1609 unsigned int next_off = curr_off + bh->b_size;
1610 next = bh->b_this_page;
1613 * is this block fully invalidated?
1615 if (offset <= curr_off)
1616 discard_buffer(bh);
1617 curr_off = next_off;
1618 bh = next;
1619 } while (bh != head);
1622 * We release buffers only if the entire page is being invalidated.
1623 * The get_block cached value has been unconditionally invalidated,
1624 * so real IO is not possible anymore.
1626 if (offset == 0)
1627 ret = try_to_release_page(page, 0);
1628 out:
1629 return ret;
1631 EXPORT_SYMBOL(block_invalidatepage);
1633 int do_invalidatepage(struct page *page, unsigned long offset)
1635 int (*invalidatepage)(struct page *, unsigned long);
1636 invalidatepage = page->mapping->a_ops->invalidatepage;
1637 if (invalidatepage == NULL)
1638 invalidatepage = block_invalidatepage;
1639 return (*invalidatepage)(page, offset);
1643 * We attach and possibly dirty the buffers atomically wrt
1644 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
1645 * is already excluded via the page lock.
1647 void create_empty_buffers(struct page *page,
1648 unsigned long blocksize, unsigned long b_state)
1650 struct buffer_head *bh, *head, *tail;
1652 head = alloc_page_buffers(page, blocksize, 1);
1653 bh = head;
1654 do {
1655 bh->b_state |= b_state;
1656 tail = bh;
1657 bh = bh->b_this_page;
1658 } while (bh);
1659 tail->b_this_page = head;
1661 spin_lock(&page->mapping->private_lock);
1662 if (PageUptodate(page) || PageDirty(page)) {
1663 bh = head;
1664 do {
1665 if (PageDirty(page))
1666 set_buffer_dirty(bh);
1667 if (PageUptodate(page))
1668 set_buffer_uptodate(bh);
1669 bh = bh->b_this_page;
1670 } while (bh != head);
1672 attach_page_buffers(page, head);
1673 spin_unlock(&page->mapping->private_lock);
1675 EXPORT_SYMBOL(create_empty_buffers);
1678 * We are taking a block for data and we don't want any output from any
1679 * buffer-cache aliases starting from return from that function and
1680 * until the moment when something will explicitly mark the buffer
1681 * dirty (hopefully that will not happen until we will free that block ;-)
1682 * We don't even need to mark it not-uptodate - nobody can expect
1683 * anything from a newly allocated buffer anyway. We used to used
1684 * unmap_buffer() for such invalidation, but that was wrong. We definitely
1685 * don't want to mark the alias unmapped, for example - it would confuse
1686 * anyone who might pick it with bread() afterwards...
1688 * Also.. Note that bforget() doesn't lock the buffer. So there can
1689 * be writeout I/O going on against recently-freed buffers. We don't
1690 * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1691 * only if we really need to. That happens here.
1693 void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1695 struct buffer_head *old_bh;
1697 might_sleep();
1699 old_bh = __find_get_block_slow(bdev, block);
1700 if (old_bh) {
1701 clear_buffer_dirty(old_bh);
1702 wait_on_buffer(old_bh);
1703 clear_buffer_req(old_bh);
1704 __brelse(old_bh);
1707 EXPORT_SYMBOL(unmap_underlying_metadata);
1710 * NOTE! All mapped/uptodate combinations are valid:
1712 * Mapped Uptodate Meaning
1714 * No No "unknown" - must do get_block()
1715 * No Yes "hole" - zero-filled
1716 * Yes No "allocated" - allocated on disk, not read in
1717 * Yes Yes "valid" - allocated and up-to-date in memory.
1719 * "Dirty" is valid only with the last case (mapped+uptodate).
1723 * While block_write_full_page is writing back the dirty buffers under
1724 * the page lock, whoever dirtied the buffers may decide to clean them
1725 * again at any time. We handle that by only looking at the buffer
1726 * state inside lock_buffer().
1728 * If block_write_full_page() is called for regular writeback
1729 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1730 * locked buffer. This only can happen if someone has written the buffer
1731 * directly, with submit_bh(). At the address_space level PageWriteback
1732 * prevents this contention from occurring.
1734 static int __block_write_full_page(struct inode *inode, struct page *page,
1735 get_block_t *get_block, struct writeback_control *wbc)
1737 int err;
1738 sector_t block;
1739 sector_t last_block;
1740 struct buffer_head *bh, *head;
1741 int nr_underway = 0;
1743 BUG_ON(!PageLocked(page));
1745 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1747 if (!page_has_buffers(page)) {
1748 create_empty_buffers(page, 1 << inode->i_blkbits,
1749 (1 << BH_Dirty)|(1 << BH_Uptodate));
1753 * Be very careful. We have no exclusion from __set_page_dirty_buffers
1754 * here, and the (potentially unmapped) buffers may become dirty at
1755 * any time. If a buffer becomes dirty here after we've inspected it
1756 * then we just miss that fact, and the page stays dirty.
1758 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1759 * handle that here by just cleaning them.
1762 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1763 head = page_buffers(page);
1764 bh = head;
1767 * Get all the dirty buffers mapped to disk addresses and
1768 * handle any aliases from the underlying blockdev's mapping.
1770 do {
1771 if (block > last_block) {
1773 * mapped buffers outside i_size will occur, because
1774 * this page can be outside i_size when there is a
1775 * truncate in progress.
1778 * The buffer was zeroed by block_write_full_page()
1780 clear_buffer_dirty(bh);
1781 set_buffer_uptodate(bh);
1782 } else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
1783 err = get_block(inode, block, bh, 1);
1784 if (err)
1785 goto recover;
1786 if (buffer_new(bh)) {
1787 /* blockdev mappings never come here */
1788 clear_buffer_new(bh);
1789 unmap_underlying_metadata(bh->b_bdev,
1790 bh->b_blocknr);
1793 bh = bh->b_this_page;
1794 block++;
1795 } while (bh != head);
1797 do {
1798 if (!buffer_mapped(bh))
1799 continue;
1801 * If it's a fully non-blocking write attempt and we cannot
1802 * lock the buffer then redirty the page. Note that this can
1803 * potentially cause a busy-wait loop from pdflush and kswapd
1804 * activity, but those code paths have their own higher-level
1805 * throttling.
1807 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1808 lock_buffer(bh);
1809 } else if (test_set_buffer_locked(bh)) {
1810 redirty_page_for_writepage(wbc, page);
1811 continue;
1813 if (test_clear_buffer_dirty(bh)) {
1814 mark_buffer_async_write(bh);
1815 } else {
1816 unlock_buffer(bh);
1818 } while ((bh = bh->b_this_page) != head);
1821 * The page and its buffers are protected by PageWriteback(), so we can
1822 * drop the bh refcounts early.
1824 BUG_ON(PageWriteback(page));
1825 set_page_writeback(page);
1827 do {
1828 struct buffer_head *next = bh->b_this_page;
1829 if (buffer_async_write(bh)) {
1830 submit_bh(WRITE, bh);
1831 nr_underway++;
1833 bh = next;
1834 } while (bh != head);
1835 unlock_page(page);
1837 err = 0;
1838 done:
1839 if (nr_underway == 0) {
1841 * The page was marked dirty, but the buffers were
1842 * clean. Someone wrote them back by hand with
1843 * ll_rw_block/submit_bh. A rare case.
1845 int uptodate = 1;
1846 do {
1847 if (!buffer_uptodate(bh)) {
1848 uptodate = 0;
1849 break;
1851 bh = bh->b_this_page;
1852 } while (bh != head);
1853 if (uptodate)
1854 SetPageUptodate(page);
1855 end_page_writeback(page);
1857 * The page and buffer_heads can be released at any time from
1858 * here on.
1860 wbc->pages_skipped++; /* We didn't write this page */
1862 return err;
1864 recover:
1866 * ENOSPC, or some other error. We may already have added some
1867 * blocks to the file, so we need to write these out to avoid
1868 * exposing stale data.
1869 * The page is currently locked and not marked for writeback
1871 bh = head;
1872 /* Recovery: lock and submit the mapped buffers */
1873 do {
1874 if (buffer_mapped(bh) && buffer_dirty(bh)) {
1875 lock_buffer(bh);
1876 mark_buffer_async_write(bh);
1877 } else {
1879 * The buffer may have been set dirty during
1880 * attachment to a dirty page.
1882 clear_buffer_dirty(bh);
1884 } while ((bh = bh->b_this_page) != head);
1885 SetPageError(page);
1886 BUG_ON(PageWriteback(page));
1887 set_page_writeback(page);
1888 unlock_page(page);
1889 do {
1890 struct buffer_head *next = bh->b_this_page;
1891 if (buffer_async_write(bh)) {
1892 clear_buffer_dirty(bh);
1893 submit_bh(WRITE, bh);
1894 nr_underway++;
1896 bh = next;
1897 } while (bh != head);
1898 goto done;
1901 static int __block_prepare_write(struct inode *inode, struct page *page,
1902 unsigned from, unsigned to, get_block_t *get_block)
1904 unsigned block_start, block_end;
1905 sector_t block;
1906 int err = 0;
1907 unsigned blocksize, bbits;
1908 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1910 BUG_ON(!PageLocked(page));
1911 BUG_ON(from > PAGE_CACHE_SIZE);
1912 BUG_ON(to > PAGE_CACHE_SIZE);
1913 BUG_ON(from > to);
1915 blocksize = 1 << inode->i_blkbits;
1916 if (!page_has_buffers(page))
1917 create_empty_buffers(page, blocksize, 0);
1918 head = page_buffers(page);
1920 bbits = inode->i_blkbits;
1921 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1923 for(bh = head, block_start = 0; bh != head || !block_start;
1924 block++, block_start=block_end, bh = bh->b_this_page) {
1925 block_end = block_start + blocksize;
1926 if (block_end <= from || block_start >= to) {
1927 if (PageUptodate(page)) {
1928 if (!buffer_uptodate(bh))
1929 set_buffer_uptodate(bh);
1931 continue;
1933 if (buffer_new(bh))
1934 clear_buffer_new(bh);
1935 if (!buffer_mapped(bh)) {
1936 err = get_block(inode, block, bh, 1);
1937 if (err)
1938 break;
1939 if (buffer_new(bh)) {
1940 unmap_underlying_metadata(bh->b_bdev,
1941 bh->b_blocknr);
1942 if (PageUptodate(page)) {
1943 set_buffer_uptodate(bh);
1944 continue;
1946 if (block_end > to || block_start < from) {
1947 void *kaddr;
1949 kaddr = kmap_atomic(page, KM_USER0);
1950 if (block_end > to)
1951 memset(kaddr+to, 0,
1952 block_end-to);
1953 if (block_start < from)
1954 memset(kaddr+block_start,
1955 0, from-block_start);
1956 flush_dcache_page(page);
1957 kunmap_atomic(kaddr, KM_USER0);
1959 continue;
1962 if (PageUptodate(page)) {
1963 if (!buffer_uptodate(bh))
1964 set_buffer_uptodate(bh);
1965 continue;
1967 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1968 (block_start < from || block_end > to)) {
1969 ll_rw_block(READ, 1, &bh);
1970 *wait_bh++=bh;
1974 * If we issued read requests - let them complete.
1976 while(wait_bh > wait) {
1977 wait_on_buffer(*--wait_bh);
1978 if (!buffer_uptodate(*wait_bh))
1979 err = -EIO;
1981 if (!err) {
1982 bh = head;
1983 do {
1984 if (buffer_new(bh))
1985 clear_buffer_new(bh);
1986 } while ((bh = bh->b_this_page) != head);
1987 return 0;
1989 /* Error case: */
1991 * Zero out any newly allocated blocks to avoid exposing stale
1992 * data. If BH_New is set, we know that the block was newly
1993 * allocated in the above loop.
1995 bh = head;
1996 block_start = 0;
1997 do {
1998 block_end = block_start+blocksize;
1999 if (block_end <= from)
2000 goto next_bh;
2001 if (block_start >= to)
2002 break;
2003 if (buffer_new(bh)) {
2004 void *kaddr;
2006 clear_buffer_new(bh);
2007 kaddr = kmap_atomic(page, KM_USER0);
2008 memset(kaddr+block_start, 0, bh->b_size);
2009 kunmap_atomic(kaddr, KM_USER0);
2010 set_buffer_uptodate(bh);
2011 mark_buffer_dirty(bh);
2013 next_bh:
2014 block_start = block_end;
2015 bh = bh->b_this_page;
2016 } while (bh != head);
2017 return err;
2020 static int __block_commit_write(struct inode *inode, struct page *page,
2021 unsigned from, unsigned to)
2023 unsigned block_start, block_end;
2024 int partial = 0;
2025 unsigned blocksize;
2026 struct buffer_head *bh, *head;
2028 blocksize = 1 << inode->i_blkbits;
2030 for(bh = head = page_buffers(page), block_start = 0;
2031 bh != head || !block_start;
2032 block_start=block_end, bh = bh->b_this_page) {
2033 block_end = block_start + blocksize;
2034 if (block_end <= from || block_start >= to) {
2035 if (!buffer_uptodate(bh))
2036 partial = 1;
2037 } else {
2038 set_buffer_uptodate(bh);
2039 mark_buffer_dirty(bh);
2044 * If this is a partial write which happened to make all buffers
2045 * uptodate then we can optimize away a bogus readpage() for
2046 * the next read(). Here we 'discover' whether the page went
2047 * uptodate as a result of this (potentially partial) write.
2049 if (!partial)
2050 SetPageUptodate(page);
2051 return 0;
2055 * Generic "read page" function for block devices that have the normal
2056 * get_block functionality. This is most of the block device filesystems.
2057 * Reads the page asynchronously --- the unlock_buffer() and
2058 * set/clear_buffer_uptodate() functions propagate buffer state into the
2059 * page struct once IO has completed.
2061 int block_read_full_page(struct page *page, get_block_t *get_block)
2063 struct inode *inode = page->mapping->host;
2064 sector_t iblock, lblock;
2065 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2066 unsigned int blocksize;
2067 int nr, i;
2068 int fully_mapped = 1;
2070 BUG_ON(!PageLocked(page));
2071 blocksize = 1 << inode->i_blkbits;
2072 if (!page_has_buffers(page))
2073 create_empty_buffers(page, blocksize, 0);
2074 head = page_buffers(page);
2076 iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2077 lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2078 bh = head;
2079 nr = 0;
2080 i = 0;
2082 do {
2083 if (buffer_uptodate(bh))
2084 continue;
2086 if (!buffer_mapped(bh)) {
2087 int err = 0;
2089 fully_mapped = 0;
2090 if (iblock < lblock) {
2091 err = get_block(inode, iblock, bh, 0);
2092 if (err)
2093 SetPageError(page);
2095 if (!buffer_mapped(bh)) {
2096 void *kaddr = kmap_atomic(page, KM_USER0);
2097 memset(kaddr + i * blocksize, 0, blocksize);
2098 flush_dcache_page(page);
2099 kunmap_atomic(kaddr, KM_USER0);
2100 if (!err)
2101 set_buffer_uptodate(bh);
2102 continue;
2105 * get_block() might have updated the buffer
2106 * synchronously
2108 if (buffer_uptodate(bh))
2109 continue;
2111 arr[nr++] = bh;
2112 } while (i++, iblock++, (bh = bh->b_this_page) != head);
2114 if (fully_mapped)
2115 SetPageMappedToDisk(page);
2117 if (!nr) {
2119 * All buffers are uptodate - we can set the page uptodate
2120 * as well. But not if get_block() returned an error.
2122 if (!PageError(page))
2123 SetPageUptodate(page);
2124 unlock_page(page);
2125 return 0;
2128 /* Stage two: lock the buffers */
2129 for (i = 0; i < nr; i++) {
2130 bh = arr[i];
2131 lock_buffer(bh);
2132 mark_buffer_async_read(bh);
2136 * Stage 3: start the IO. Check for uptodateness
2137 * inside the buffer lock in case another process reading
2138 * the underlying blockdev brought it uptodate (the sct fix).
2140 for (i = 0; i < nr; i++) {
2141 bh = arr[i];
2142 if (buffer_uptodate(bh))
2143 end_buffer_async_read(bh, 1);
2144 else
2145 submit_bh(READ, bh);
2147 return 0;
2150 /* utility function for filesystems that need to do work on expanding
2151 * truncates. Uses prepare/commit_write to allow the filesystem to
2152 * deal with the hole.
2154 static int __generic_cont_expand(struct inode *inode, loff_t size,
2155 pgoff_t index, unsigned int offset)
2157 struct address_space *mapping = inode->i_mapping;
2158 struct page *page;
2159 unsigned long limit;
2160 int err;
2162 err = -EFBIG;
2163 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
2164 if (limit != RLIM_INFINITY && size > (loff_t)limit) {
2165 send_sig(SIGXFSZ, current, 0);
2166 goto out;
2168 if (size > inode->i_sb->s_maxbytes)
2169 goto out;
2171 err = -ENOMEM;
2172 page = grab_cache_page(mapping, index);
2173 if (!page)
2174 goto out;
2175 err = mapping->a_ops->prepare_write(NULL, page, offset, offset);
2176 if (err) {
2178 * ->prepare_write() may have instantiated a few blocks
2179 * outside i_size. Trim these off again.
2181 unlock_page(page);
2182 page_cache_release(page);
2183 vmtruncate(inode, inode->i_size);
2184 goto out;
2187 err = mapping->a_ops->commit_write(NULL, page, offset, offset);
2189 unlock_page(page);
2190 page_cache_release(page);
2191 if (err > 0)
2192 err = 0;
2193 out:
2194 return err;
2197 int generic_cont_expand(struct inode *inode, loff_t size)
2199 pgoff_t index;
2200 unsigned int offset;
2202 offset = (size & (PAGE_CACHE_SIZE - 1)); /* Within page */
2204 /* ugh. in prepare/commit_write, if from==to==start of block, we
2205 ** skip the prepare. make sure we never send an offset for the start
2206 ** of a block
2208 if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {
2209 /* caller must handle this extra byte. */
2210 offset++;
2212 index = size >> PAGE_CACHE_SHIFT;
2214 return __generic_cont_expand(inode, size, index, offset);
2217 int generic_cont_expand_simple(struct inode *inode, loff_t size)
2219 loff_t pos = size - 1;
2220 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2221 unsigned int offset = (pos & (PAGE_CACHE_SIZE - 1)) + 1;
2223 /* prepare/commit_write can handle even if from==to==start of block. */
2224 return __generic_cont_expand(inode, size, index, offset);
2228 * For moronic filesystems that do not allow holes in file.
2229 * We may have to extend the file.
2232 int cont_prepare_write(struct page *page, unsigned offset,
2233 unsigned to, get_block_t *get_block, loff_t *bytes)
2235 struct address_space *mapping = page->mapping;
2236 struct inode *inode = mapping->host;
2237 struct page *new_page;
2238 pgoff_t pgpos;
2239 long status;
2240 unsigned zerofrom;
2241 unsigned blocksize = 1 << inode->i_blkbits;
2242 void *kaddr;
2244 while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) {
2245 status = -ENOMEM;
2246 new_page = grab_cache_page(mapping, pgpos);
2247 if (!new_page)
2248 goto out;
2249 /* we might sleep */
2250 if (*bytes>>PAGE_CACHE_SHIFT != pgpos) {
2251 unlock_page(new_page);
2252 page_cache_release(new_page);
2253 continue;
2255 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2256 if (zerofrom & (blocksize-1)) {
2257 *bytes |= (blocksize-1);
2258 (*bytes)++;
2260 status = __block_prepare_write(inode, new_page, zerofrom,
2261 PAGE_CACHE_SIZE, get_block);
2262 if (status)
2263 goto out_unmap;
2264 kaddr = kmap_atomic(new_page, KM_USER0);
2265 memset(kaddr+zerofrom, 0, PAGE_CACHE_SIZE-zerofrom);
2266 flush_dcache_page(new_page);
2267 kunmap_atomic(kaddr, KM_USER0);
2268 generic_commit_write(NULL, new_page, zerofrom, PAGE_CACHE_SIZE);
2269 unlock_page(new_page);
2270 page_cache_release(new_page);
2273 if (page->index < pgpos) {
2274 /* completely inside the area */
2275 zerofrom = offset;
2276 } else {
2277 /* page covers the boundary, find the boundary offset */
2278 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2280 /* if we will expand the thing last block will be filled */
2281 if (to > zerofrom && (zerofrom & (blocksize-1))) {
2282 *bytes |= (blocksize-1);
2283 (*bytes)++;
2286 /* starting below the boundary? Nothing to zero out */
2287 if (offset <= zerofrom)
2288 zerofrom = offset;
2290 status = __block_prepare_write(inode, page, zerofrom, to, get_block);
2291 if (status)
2292 goto out1;
2293 if (zerofrom < offset) {
2294 kaddr = kmap_atomic(page, KM_USER0);
2295 memset(kaddr+zerofrom, 0, offset-zerofrom);
2296 flush_dcache_page(page);
2297 kunmap_atomic(kaddr, KM_USER0);
2298 __block_commit_write(inode, page, zerofrom, offset);
2300 return 0;
2301 out1:
2302 ClearPageUptodate(page);
2303 return status;
2305 out_unmap:
2306 ClearPageUptodate(new_page);
2307 unlock_page(new_page);
2308 page_cache_release(new_page);
2309 out:
2310 return status;
2313 int block_prepare_write(struct page *page, unsigned from, unsigned to,
2314 get_block_t *get_block)
2316 struct inode *inode = page->mapping->host;
2317 int err = __block_prepare_write(inode, page, from, to, get_block);
2318 if (err)
2319 ClearPageUptodate(page);
2320 return err;
2323 int block_commit_write(struct page *page, unsigned from, unsigned to)
2325 struct inode *inode = page->mapping->host;
2326 __block_commit_write(inode,page,from,to);
2327 return 0;
2330 int generic_commit_write(struct file *file, struct page *page,
2331 unsigned from, unsigned to)
2333 struct inode *inode = page->mapping->host;
2334 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2335 __block_commit_write(inode,page,from,to);
2337 * No need to use i_size_read() here, the i_size
2338 * cannot change under us because we hold i_mutex.
2340 if (pos > inode->i_size) {
2341 i_size_write(inode, pos);
2342 mark_inode_dirty(inode);
2344 return 0;
2349 * nobh_prepare_write()'s prereads are special: the buffer_heads are freed
2350 * immediately, while under the page lock. So it needs a special end_io
2351 * handler which does not touch the bh after unlocking it.
2353 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
2354 * a race there is benign: unlock_buffer() only use the bh's address for
2355 * hashing after unlocking the buffer, so it doesn't actually touch the bh
2356 * itself.
2358 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2360 if (uptodate) {
2361 set_buffer_uptodate(bh);
2362 } else {
2363 /* This happens, due to failed READA attempts. */
2364 clear_buffer_uptodate(bh);
2366 unlock_buffer(bh);
2370 * On entry, the page is fully not uptodate.
2371 * On exit the page is fully uptodate in the areas outside (from,to)
2373 int nobh_prepare_write(struct page *page, unsigned from, unsigned to,
2374 get_block_t *get_block)
2376 struct inode *inode = page->mapping->host;
2377 const unsigned blkbits = inode->i_blkbits;
2378 const unsigned blocksize = 1 << blkbits;
2379 struct buffer_head map_bh;
2380 struct buffer_head *read_bh[MAX_BUF_PER_PAGE];
2381 unsigned block_in_page;
2382 unsigned block_start;
2383 sector_t block_in_file;
2384 char *kaddr;
2385 int nr_reads = 0;
2386 int i;
2387 int ret = 0;
2388 int is_mapped_to_disk = 1;
2389 int dirtied_it = 0;
2391 if (PageMappedToDisk(page))
2392 return 0;
2394 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2395 map_bh.b_page = page;
2398 * We loop across all blocks in the page, whether or not they are
2399 * part of the affected region. This is so we can discover if the
2400 * page is fully mapped-to-disk.
2402 for (block_start = 0, block_in_page = 0;
2403 block_start < PAGE_CACHE_SIZE;
2404 block_in_page++, block_start += blocksize) {
2405 unsigned block_end = block_start + blocksize;
2406 int create;
2408 map_bh.b_state = 0;
2409 create = 1;
2410 if (block_start >= to)
2411 create = 0;
2412 ret = get_block(inode, block_in_file + block_in_page,
2413 &map_bh, create);
2414 if (ret)
2415 goto failed;
2416 if (!buffer_mapped(&map_bh))
2417 is_mapped_to_disk = 0;
2418 if (buffer_new(&map_bh))
2419 unmap_underlying_metadata(map_bh.b_bdev,
2420 map_bh.b_blocknr);
2421 if (PageUptodate(page))
2422 continue;
2423 if (buffer_new(&map_bh) || !buffer_mapped(&map_bh)) {
2424 kaddr = kmap_atomic(page, KM_USER0);
2425 if (block_start < from) {
2426 memset(kaddr+block_start, 0, from-block_start);
2427 dirtied_it = 1;
2429 if (block_end > to) {
2430 memset(kaddr + to, 0, block_end - to);
2431 dirtied_it = 1;
2433 flush_dcache_page(page);
2434 kunmap_atomic(kaddr, KM_USER0);
2435 continue;
2437 if (buffer_uptodate(&map_bh))
2438 continue; /* reiserfs does this */
2439 if (block_start < from || block_end > to) {
2440 struct buffer_head *bh = alloc_buffer_head(GFP_NOFS);
2442 if (!bh) {
2443 ret = -ENOMEM;
2444 goto failed;
2446 bh->b_state = map_bh.b_state;
2447 atomic_set(&bh->b_count, 0);
2448 bh->b_this_page = NULL;
2449 bh->b_page = page;
2450 bh->b_blocknr = map_bh.b_blocknr;
2451 bh->b_size = blocksize;
2452 bh->b_data = (char *)(long)block_start;
2453 bh->b_bdev = map_bh.b_bdev;
2454 bh->b_private = NULL;
2455 read_bh[nr_reads++] = bh;
2459 if (nr_reads) {
2460 struct buffer_head *bh;
2463 * The page is locked, so these buffers are protected from
2464 * any VM or truncate activity. Hence we don't need to care
2465 * for the buffer_head refcounts.
2467 for (i = 0; i < nr_reads; i++) {
2468 bh = read_bh[i];
2469 lock_buffer(bh);
2470 bh->b_end_io = end_buffer_read_nobh;
2471 submit_bh(READ, bh);
2473 for (i = 0; i < nr_reads; i++) {
2474 bh = read_bh[i];
2475 wait_on_buffer(bh);
2476 if (!buffer_uptodate(bh))
2477 ret = -EIO;
2478 free_buffer_head(bh);
2479 read_bh[i] = NULL;
2481 if (ret)
2482 goto failed;
2485 if (is_mapped_to_disk)
2486 SetPageMappedToDisk(page);
2487 SetPageUptodate(page);
2490 * Setting the page dirty here isn't necessary for the prepare_write
2491 * function - commit_write will do that. But if/when this function is
2492 * used within the pagefault handler to ensure that all mmapped pages
2493 * have backing space in the filesystem, we will need to dirty the page
2494 * if its contents were altered.
2496 if (dirtied_it)
2497 set_page_dirty(page);
2499 return 0;
2501 failed:
2502 for (i = 0; i < nr_reads; i++) {
2503 if (read_bh[i])
2504 free_buffer_head(read_bh[i]);
2508 * Error recovery is pretty slack. Clear the page and mark it dirty
2509 * so we'll later zero out any blocks which _were_ allocated.
2511 kaddr = kmap_atomic(page, KM_USER0);
2512 memset(kaddr, 0, PAGE_CACHE_SIZE);
2513 kunmap_atomic(kaddr, KM_USER0);
2514 SetPageUptodate(page);
2515 set_page_dirty(page);
2516 return ret;
2518 EXPORT_SYMBOL(nobh_prepare_write);
2520 int nobh_commit_write(struct file *file, struct page *page,
2521 unsigned from, unsigned to)
2523 struct inode *inode = page->mapping->host;
2524 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2526 set_page_dirty(page);
2527 if (pos > inode->i_size) {
2528 i_size_write(inode, pos);
2529 mark_inode_dirty(inode);
2531 return 0;
2533 EXPORT_SYMBOL(nobh_commit_write);
2536 * nobh_writepage() - based on block_full_write_page() except
2537 * that it tries to operate without attaching bufferheads to
2538 * the page.
2540 int nobh_writepage(struct page *page, get_block_t *get_block,
2541 struct writeback_control *wbc)
2543 struct inode * const inode = page->mapping->host;
2544 loff_t i_size = i_size_read(inode);
2545 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2546 unsigned offset;
2547 void *kaddr;
2548 int ret;
2550 /* Is the page fully inside i_size? */
2551 if (page->index < end_index)
2552 goto out;
2554 /* Is the page fully outside i_size? (truncate in progress) */
2555 offset = i_size & (PAGE_CACHE_SIZE-1);
2556 if (page->index >= end_index+1 || !offset) {
2558 * The page may have dirty, unmapped buffers. For example,
2559 * they may have been added in ext3_writepage(). Make them
2560 * freeable here, so the page does not leak.
2562 #if 0
2563 /* Not really sure about this - do we need this ? */
2564 if (page->mapping->a_ops->invalidatepage)
2565 page->mapping->a_ops->invalidatepage(page, offset);
2566 #endif
2567 unlock_page(page);
2568 return 0; /* don't care */
2572 * The page straddles i_size. It must be zeroed out on each and every
2573 * writepage invocation because it may be mmapped. "A file is mapped
2574 * in multiples of the page size. For a file that is not a multiple of
2575 * the page size, the remaining memory is zeroed when mapped, and
2576 * writes to that region are not written out to the file."
2578 kaddr = kmap_atomic(page, KM_USER0);
2579 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2580 flush_dcache_page(page);
2581 kunmap_atomic(kaddr, KM_USER0);
2582 out:
2583 ret = mpage_writepage(page, get_block, wbc);
2584 if (ret == -EAGAIN)
2585 ret = __block_write_full_page(inode, page, get_block, wbc);
2586 return ret;
2588 EXPORT_SYMBOL(nobh_writepage);
2591 * This function assumes that ->prepare_write() uses nobh_prepare_write().
2593 int nobh_truncate_page(struct address_space *mapping, loff_t from)
2595 struct inode *inode = mapping->host;
2596 unsigned blocksize = 1 << inode->i_blkbits;
2597 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2598 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2599 unsigned to;
2600 struct page *page;
2601 struct address_space_operations *a_ops = mapping->a_ops;
2602 char *kaddr;
2603 int ret = 0;
2605 if ((offset & (blocksize - 1)) == 0)
2606 goto out;
2608 ret = -ENOMEM;
2609 page = grab_cache_page(mapping, index);
2610 if (!page)
2611 goto out;
2613 to = (offset + blocksize) & ~(blocksize - 1);
2614 ret = a_ops->prepare_write(NULL, page, offset, to);
2615 if (ret == 0) {
2616 kaddr = kmap_atomic(page, KM_USER0);
2617 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2618 flush_dcache_page(page);
2619 kunmap_atomic(kaddr, KM_USER0);
2620 set_page_dirty(page);
2622 unlock_page(page);
2623 page_cache_release(page);
2624 out:
2625 return ret;
2627 EXPORT_SYMBOL(nobh_truncate_page);
2629 int block_truncate_page(struct address_space *mapping,
2630 loff_t from, get_block_t *get_block)
2632 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2633 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2634 unsigned blocksize;
2635 sector_t iblock;
2636 unsigned length, pos;
2637 struct inode *inode = mapping->host;
2638 struct page *page;
2639 struct buffer_head *bh;
2640 void *kaddr;
2641 int err;
2643 blocksize = 1 << inode->i_blkbits;
2644 length = offset & (blocksize - 1);
2646 /* Block boundary? Nothing to do */
2647 if (!length)
2648 return 0;
2650 length = blocksize - length;
2651 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2653 page = grab_cache_page(mapping, index);
2654 err = -ENOMEM;
2655 if (!page)
2656 goto out;
2658 if (!page_has_buffers(page))
2659 create_empty_buffers(page, blocksize, 0);
2661 /* Find the buffer that contains "offset" */
2662 bh = page_buffers(page);
2663 pos = blocksize;
2664 while (offset >= pos) {
2665 bh = bh->b_this_page;
2666 iblock++;
2667 pos += blocksize;
2670 err = 0;
2671 if (!buffer_mapped(bh)) {
2672 err = get_block(inode, iblock, bh, 0);
2673 if (err)
2674 goto unlock;
2675 /* unmapped? It's a hole - nothing to do */
2676 if (!buffer_mapped(bh))
2677 goto unlock;
2680 /* Ok, it's mapped. Make sure it's up-to-date */
2681 if (PageUptodate(page))
2682 set_buffer_uptodate(bh);
2684 if (!buffer_uptodate(bh) && !buffer_delay(bh)) {
2685 err = -EIO;
2686 ll_rw_block(READ, 1, &bh);
2687 wait_on_buffer(bh);
2688 /* Uhhuh. Read error. Complain and punt. */
2689 if (!buffer_uptodate(bh))
2690 goto unlock;
2693 kaddr = kmap_atomic(page, KM_USER0);
2694 memset(kaddr + offset, 0, length);
2695 flush_dcache_page(page);
2696 kunmap_atomic(kaddr, KM_USER0);
2698 mark_buffer_dirty(bh);
2699 err = 0;
2701 unlock:
2702 unlock_page(page);
2703 page_cache_release(page);
2704 out:
2705 return err;
2709 * The generic ->writepage function for buffer-backed address_spaces
2711 int block_write_full_page(struct page *page, get_block_t *get_block,
2712 struct writeback_control *wbc)
2714 struct inode * const inode = page->mapping->host;
2715 loff_t i_size = i_size_read(inode);
2716 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2717 unsigned offset;
2718 void *kaddr;
2720 /* Is the page fully inside i_size? */
2721 if (page->index < end_index)
2722 return __block_write_full_page(inode, page, get_block, wbc);
2724 /* Is the page fully outside i_size? (truncate in progress) */
2725 offset = i_size & (PAGE_CACHE_SIZE-1);
2726 if (page->index >= end_index+1 || !offset) {
2728 * The page may have dirty, unmapped buffers. For example,
2729 * they may have been added in ext3_writepage(). Make them
2730 * freeable here, so the page does not leak.
2732 do_invalidatepage(page, 0);
2733 unlock_page(page);
2734 return 0; /* don't care */
2738 * The page straddles i_size. It must be zeroed out on each and every
2739 * writepage invokation because it may be mmapped. "A file is mapped
2740 * in multiples of the page size. For a file that is not a multiple of
2741 * the page size, the remaining memory is zeroed when mapped, and
2742 * writes to that region are not written out to the file."
2744 kaddr = kmap_atomic(page, KM_USER0);
2745 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2746 flush_dcache_page(page);
2747 kunmap_atomic(kaddr, KM_USER0);
2748 return __block_write_full_page(inode, page, get_block, wbc);
2751 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2752 get_block_t *get_block)
2754 struct buffer_head tmp;
2755 struct inode *inode = mapping->host;
2756 tmp.b_state = 0;
2757 tmp.b_blocknr = 0;
2758 get_block(inode, block, &tmp, 0);
2759 return tmp.b_blocknr;
2762 static int end_bio_bh_io_sync(struct bio *bio, unsigned int bytes_done, int err)
2764 struct buffer_head *bh = bio->bi_private;
2766 if (bio->bi_size)
2767 return 1;
2769 if (err == -EOPNOTSUPP) {
2770 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2771 set_bit(BH_Eopnotsupp, &bh->b_state);
2774 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2775 bio_put(bio);
2776 return 0;
2779 int submit_bh(int rw, struct buffer_head * bh)
2781 struct bio *bio;
2782 int ret = 0;
2784 BUG_ON(!buffer_locked(bh));
2785 BUG_ON(!buffer_mapped(bh));
2786 BUG_ON(!bh->b_end_io);
2788 if (buffer_ordered(bh) && (rw == WRITE))
2789 rw = WRITE_BARRIER;
2792 * Only clear out a write error when rewriting, should this
2793 * include WRITE_SYNC as well?
2795 if (test_set_buffer_req(bh) && (rw == WRITE || rw == WRITE_BARRIER))
2796 clear_buffer_write_io_error(bh);
2799 * from here on down, it's all bio -- do the initial mapping,
2800 * submit_bio -> generic_make_request may further map this bio around
2802 bio = bio_alloc(GFP_NOIO, 1);
2804 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2805 bio->bi_bdev = bh->b_bdev;
2806 bio->bi_io_vec[0].bv_page = bh->b_page;
2807 bio->bi_io_vec[0].bv_len = bh->b_size;
2808 bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2810 bio->bi_vcnt = 1;
2811 bio->bi_idx = 0;
2812 bio->bi_size = bh->b_size;
2814 bio->bi_end_io = end_bio_bh_io_sync;
2815 bio->bi_private = bh;
2817 bio_get(bio);
2818 submit_bio(rw, bio);
2820 if (bio_flagged(bio, BIO_EOPNOTSUPP))
2821 ret = -EOPNOTSUPP;
2823 bio_put(bio);
2824 return ret;
2828 * ll_rw_block: low-level access to block devices (DEPRECATED)
2829 * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
2830 * @nr: number of &struct buffer_heads in the array
2831 * @bhs: array of pointers to &struct buffer_head
2833 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2834 * requests an I/O operation on them, either a %READ or a %WRITE. The third
2835 * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
2836 * are sent to disk. The fourth %READA option is described in the documentation
2837 * for generic_make_request() which ll_rw_block() calls.
2839 * This function drops any buffer that it cannot get a lock on (with the
2840 * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
2841 * clean when doing a write request, and any buffer that appears to be
2842 * up-to-date when doing read request. Further it marks as clean buffers that
2843 * are processed for writing (the buffer cache won't assume that they are
2844 * actually clean until the buffer gets unlocked).
2846 * ll_rw_block sets b_end_io to simple completion handler that marks
2847 * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2848 * any waiters.
2850 * All of the buffers must be for the same device, and must also be a
2851 * multiple of the current approved size for the device.
2853 void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2855 int i;
2857 for (i = 0; i < nr; i++) {
2858 struct buffer_head *bh = bhs[i];
2860 if (rw == SWRITE)
2861 lock_buffer(bh);
2862 else if (test_set_buffer_locked(bh))
2863 continue;
2865 if (rw == WRITE || rw == SWRITE) {
2866 if (test_clear_buffer_dirty(bh)) {
2867 bh->b_end_io = end_buffer_write_sync;
2868 get_bh(bh);
2869 submit_bh(WRITE, bh);
2870 continue;
2872 } else {
2873 if (!buffer_uptodate(bh)) {
2874 bh->b_end_io = end_buffer_read_sync;
2875 get_bh(bh);
2876 submit_bh(rw, bh);
2877 continue;
2880 unlock_buffer(bh);
2885 * For a data-integrity writeout, we need to wait upon any in-progress I/O
2886 * and then start new I/O and then wait upon it. The caller must have a ref on
2887 * the buffer_head.
2889 int sync_dirty_buffer(struct buffer_head *bh)
2891 int ret = 0;
2893 WARN_ON(atomic_read(&bh->b_count) < 1);
2894 lock_buffer(bh);
2895 if (test_clear_buffer_dirty(bh)) {
2896 get_bh(bh);
2897 bh->b_end_io = end_buffer_write_sync;
2898 ret = submit_bh(WRITE, bh);
2899 wait_on_buffer(bh);
2900 if (buffer_eopnotsupp(bh)) {
2901 clear_buffer_eopnotsupp(bh);
2902 ret = -EOPNOTSUPP;
2904 if (!ret && !buffer_uptodate(bh))
2905 ret = -EIO;
2906 } else {
2907 unlock_buffer(bh);
2909 return ret;
2913 * try_to_free_buffers() checks if all the buffers on this particular page
2914 * are unused, and releases them if so.
2916 * Exclusion against try_to_free_buffers may be obtained by either
2917 * locking the page or by holding its mapping's private_lock.
2919 * If the page is dirty but all the buffers are clean then we need to
2920 * be sure to mark the page clean as well. This is because the page
2921 * may be against a block device, and a later reattachment of buffers
2922 * to a dirty page will set *all* buffers dirty. Which would corrupt
2923 * filesystem data on the same device.
2925 * The same applies to regular filesystem pages: if all the buffers are
2926 * clean then we set the page clean and proceed. To do that, we require
2927 * total exclusion from __set_page_dirty_buffers(). That is obtained with
2928 * private_lock.
2930 * try_to_free_buffers() is non-blocking.
2932 static inline int buffer_busy(struct buffer_head *bh)
2934 return atomic_read(&bh->b_count) |
2935 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
2938 static int
2939 drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
2941 struct buffer_head *head = page_buffers(page);
2942 struct buffer_head *bh;
2944 bh = head;
2945 do {
2946 if (buffer_write_io_error(bh) && page->mapping)
2947 set_bit(AS_EIO, &page->mapping->flags);
2948 if (buffer_busy(bh))
2949 goto failed;
2950 bh = bh->b_this_page;
2951 } while (bh != head);
2953 do {
2954 struct buffer_head *next = bh->b_this_page;
2956 if (!list_empty(&bh->b_assoc_buffers))
2957 __remove_assoc_queue(bh);
2958 bh = next;
2959 } while (bh != head);
2960 *buffers_to_free = head;
2961 __clear_page_buffers(page);
2962 return 1;
2963 failed:
2964 return 0;
2967 int try_to_free_buffers(struct page *page)
2969 struct address_space * const mapping = page->mapping;
2970 struct buffer_head *buffers_to_free = NULL;
2971 int ret = 0;
2973 BUG_ON(!PageLocked(page));
2974 if (PageWriteback(page))
2975 return 0;
2977 if (mapping == NULL) { /* can this still happen? */
2978 ret = drop_buffers(page, &buffers_to_free);
2979 goto out;
2982 spin_lock(&mapping->private_lock);
2983 ret = drop_buffers(page, &buffers_to_free);
2984 if (ret) {
2986 * If the filesystem writes its buffers by hand (eg ext3)
2987 * then we can have clean buffers against a dirty page. We
2988 * clean the page here; otherwise later reattachment of buffers
2989 * could encounter a non-uptodate page, which is unresolvable.
2990 * This only applies in the rare case where try_to_free_buffers
2991 * succeeds but the page is not freed.
2993 clear_page_dirty(page);
2995 spin_unlock(&mapping->private_lock);
2996 out:
2997 if (buffers_to_free) {
2998 struct buffer_head *bh = buffers_to_free;
3000 do {
3001 struct buffer_head *next = bh->b_this_page;
3002 free_buffer_head(bh);
3003 bh = next;
3004 } while (bh != buffers_to_free);
3006 return ret;
3008 EXPORT_SYMBOL(try_to_free_buffers);
3010 int block_sync_page(struct page *page)
3012 struct address_space *mapping;
3014 smp_mb();
3015 mapping = page_mapping(page);
3016 if (mapping)
3017 blk_run_backing_dev(mapping->backing_dev_info, page);
3018 return 0;
3022 * There are no bdflush tunables left. But distributions are
3023 * still running obsolete flush daemons, so we terminate them here.
3025 * Use of bdflush() is deprecated and will be removed in a future kernel.
3026 * The `pdflush' kernel threads fully replace bdflush daemons and this call.
3028 asmlinkage long sys_bdflush(int func, long data)
3030 static int msg_count;
3032 if (!capable(CAP_SYS_ADMIN))
3033 return -EPERM;
3035 if (msg_count < 5) {
3036 msg_count++;
3037 printk(KERN_INFO
3038 "warning: process `%s' used the obsolete bdflush"
3039 " system call\n", current->comm);
3040 printk(KERN_INFO "Fix your initscripts?\n");
3043 if (func == 1)
3044 do_exit(0);
3045 return 0;
3049 * Buffer-head allocation
3051 static kmem_cache_t *bh_cachep;
3054 * Once the number of bh's in the machine exceeds this level, we start
3055 * stripping them in writeback.
3057 static int max_buffer_heads;
3059 int buffer_heads_over_limit;
3061 struct bh_accounting {
3062 int nr; /* Number of live bh's */
3063 int ratelimit; /* Limit cacheline bouncing */
3066 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3068 static void recalc_bh_state(void)
3070 int i;
3071 int tot = 0;
3073 if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
3074 return;
3075 __get_cpu_var(bh_accounting).ratelimit = 0;
3076 for_each_online_cpu(i)
3077 tot += per_cpu(bh_accounting, i).nr;
3078 buffer_heads_over_limit = (tot > max_buffer_heads);
3081 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3083 struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
3084 if (ret) {
3085 get_cpu_var(bh_accounting).nr++;
3086 recalc_bh_state();
3087 put_cpu_var(bh_accounting);
3089 return ret;
3091 EXPORT_SYMBOL(alloc_buffer_head);
3093 void free_buffer_head(struct buffer_head *bh)
3095 BUG_ON(!list_empty(&bh->b_assoc_buffers));
3096 kmem_cache_free(bh_cachep, bh);
3097 get_cpu_var(bh_accounting).nr--;
3098 recalc_bh_state();
3099 put_cpu_var(bh_accounting);
3101 EXPORT_SYMBOL(free_buffer_head);
3103 static void
3104 init_buffer_head(void *data, kmem_cache_t *cachep, unsigned long flags)
3106 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
3107 SLAB_CTOR_CONSTRUCTOR) {
3108 struct buffer_head * bh = (struct buffer_head *)data;
3110 memset(bh, 0, sizeof(*bh));
3111 INIT_LIST_HEAD(&bh->b_assoc_buffers);
3115 #ifdef CONFIG_HOTPLUG_CPU
3116 static void buffer_exit_cpu(int cpu)
3118 int i;
3119 struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3121 for (i = 0; i < BH_LRU_SIZE; i++) {
3122 brelse(b->bhs[i]);
3123 b->bhs[i] = NULL;
3125 get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
3126 per_cpu(bh_accounting, cpu).nr = 0;
3127 put_cpu_var(bh_accounting);
3130 static int buffer_cpu_notify(struct notifier_block *self,
3131 unsigned long action, void *hcpu)
3133 if (action == CPU_DEAD)
3134 buffer_exit_cpu((unsigned long)hcpu);
3135 return NOTIFY_OK;
3137 #endif /* CONFIG_HOTPLUG_CPU */
3139 void __init buffer_init(void)
3141 int nrpages;
3143 bh_cachep = kmem_cache_create("buffer_head",
3144 sizeof(struct buffer_head), 0,
3145 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3146 SLAB_MEM_SPREAD),
3147 init_buffer_head,
3148 NULL);
3151 * Limit the bh occupancy to 10% of ZONE_NORMAL
3153 nrpages = (nr_free_buffer_pages() * 10) / 100;
3154 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3155 hotcpu_notifier(buffer_cpu_notify, 0);
3158 EXPORT_SYMBOL(__bforget);
3159 EXPORT_SYMBOL(__brelse);
3160 EXPORT_SYMBOL(__wait_on_buffer);
3161 EXPORT_SYMBOL(block_commit_write);
3162 EXPORT_SYMBOL(block_prepare_write);
3163 EXPORT_SYMBOL(block_read_full_page);
3164 EXPORT_SYMBOL(block_sync_page);
3165 EXPORT_SYMBOL(block_truncate_page);
3166 EXPORT_SYMBOL(block_write_full_page);
3167 EXPORT_SYMBOL(cont_prepare_write);
3168 EXPORT_SYMBOL(end_buffer_async_write);
3169 EXPORT_SYMBOL(end_buffer_read_sync);
3170 EXPORT_SYMBOL(end_buffer_write_sync);
3171 EXPORT_SYMBOL(file_fsync);
3172 EXPORT_SYMBOL(fsync_bdev);
3173 EXPORT_SYMBOL(generic_block_bmap);
3174 EXPORT_SYMBOL(generic_commit_write);
3175 EXPORT_SYMBOL(generic_cont_expand);
3176 EXPORT_SYMBOL(generic_cont_expand_simple);
3177 EXPORT_SYMBOL(init_buffer);
3178 EXPORT_SYMBOL(invalidate_bdev);
3179 EXPORT_SYMBOL(ll_rw_block);
3180 EXPORT_SYMBOL(mark_buffer_dirty);
3181 EXPORT_SYMBOL(submit_bh);
3182 EXPORT_SYMBOL(sync_dirty_buffer);
3183 EXPORT_SYMBOL(unlock_buffer);