Partial rewrite.
[linux-2.6/linux-mips.git] / fs / buffer.c
blobe2a89caf51fd311ce66f84c1cc304ddc499659d4
1 /*
2 * linux/fs/buffer.c
4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
5 */
7 /*
8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
10 * Removed a lot of unnecessary code and simplified things now that
11 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
13 * Speed up hash, lru, and free list operations. Use gfp() for allocating
14 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
16 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
18 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
21 #include <linux/config.h>
22 #include <linux/kernel.h>
23 #include <linux/fs.h>
24 #include <linux/mm.h>
25 #include <linux/percpu.h>
26 #include <linux/slab.h>
27 #include <linux/smp_lock.h>
28 #include <linux/blkdev.h>
29 #include <linux/file.h>
30 #include <linux/quotaops.h>
31 #include <linux/highmem.h>
32 #include <linux/module.h>
33 #include <linux/writeback.h>
34 #include <linux/hash.h>
35 #include <linux/suspend.h>
36 #include <linux/buffer_head.h>
37 #include <linux/bio.h>
38 #include <linux/notifier.h>
39 #include <linux/cpu.h>
40 #include <asm/bitops.h>
42 static void invalidate_bh_lrus(void);
44 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
47 * Hashed waitqueue_head's for wait_on_buffer()
49 #define BH_WAIT_TABLE_ORDER 7
50 static struct bh_wait_queue_head {
51 wait_queue_head_t wqh;
52 } ____cacheline_aligned_in_smp bh_wait_queue_heads[1<<BH_WAIT_TABLE_ORDER];
55 * Debug/devel support stuff
58 void __buffer_error(char *file, int line)
60 static int enough;
62 if (enough > 10)
63 return;
64 enough++;
65 printk("buffer layer error at %s:%d\n", file, line);
66 #ifndef CONFIG_KALLSYMS
67 printk("Pass this trace through ksymoops for reporting\n");
68 #endif
69 dump_stack();
71 EXPORT_SYMBOL(__buffer_error);
73 inline void
74 init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
76 bh->b_end_io = handler;
77 bh->b_private = private;
81 * Return the address of the waitqueue_head to be used for this
82 * buffer_head
84 wait_queue_head_t *bh_waitq_head(struct buffer_head *bh)
86 return &bh_wait_queue_heads[hash_ptr(bh, BH_WAIT_TABLE_ORDER)].wqh;
88 EXPORT_SYMBOL(bh_waitq_head);
90 void wake_up_buffer(struct buffer_head *bh)
92 wait_queue_head_t *wq = bh_waitq_head(bh);
94 if (waitqueue_active(wq))
95 wake_up_all(wq);
97 EXPORT_SYMBOL(wake_up_buffer);
99 void unlock_buffer(struct buffer_head *bh)
102 * unlock_buffer against a zero-count bh is a bug, if the page
103 * is not locked. Because then nothing protects the buffer's
104 * waitqueue, which is used here. (Well. Other locked buffers
105 * against the page will pin it. But complain anyway).
107 if (atomic_read(&bh->b_count) == 0 &&
108 !PageLocked(bh->b_page) &&
109 !PageWriteback(bh->b_page))
110 buffer_error();
112 clear_buffer_locked(bh);
113 smp_mb__after_clear_bit();
114 wake_up_buffer(bh);
118 * Block until a buffer comes unlocked. This doesn't stop it
119 * from becoming locked again - you have to lock it yourself
120 * if you want to preserve its state.
122 void __wait_on_buffer(struct buffer_head * bh)
124 wait_queue_head_t *wqh = bh_waitq_head(bh);
125 DEFINE_WAIT(wait);
127 if (atomic_read(&bh->b_count) == 0 &&
128 (!bh->b_page || !PageLocked(bh->b_page)))
129 buffer_error();
131 do {
132 prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
133 if (buffer_locked(bh)) {
134 blk_run_queues();
135 io_schedule();
137 } while (buffer_locked(bh));
138 finish_wait(wqh, &wait);
141 static void
142 __set_page_buffers(struct page *page, struct buffer_head *head)
144 if (page_has_buffers(page))
145 buffer_error();
146 page_cache_get(page);
147 SetPagePrivate(page);
148 page->private = (unsigned long)head;
151 static void
152 __clear_page_buffers(struct page *page)
154 ClearPagePrivate(page);
155 page->private = 0;
156 page_cache_release(page);
159 static void buffer_io_error(struct buffer_head *bh)
161 char b[BDEVNAME_SIZE];
163 printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
164 bdevname(bh->b_bdev, b),
165 (unsigned long long)bh->b_blocknr);
169 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
170 * unlock the buffer. This is what ll_rw_block uses too.
172 void end_buffer_io_sync(struct buffer_head *bh, int uptodate)
174 if (uptodate) {
175 set_buffer_uptodate(bh);
176 } else {
178 * This happens, due to failed READA attempts.
179 * buffer_io_error(bh);
181 clear_buffer_uptodate(bh);
183 unlock_buffer(bh);
184 put_bh(bh);
188 * Write out and wait upon all the dirty data associated with a block
189 * device via its mapping. Does not take the superblock lock.
191 int sync_blockdev(struct block_device *bdev)
193 int ret = 0;
195 if (bdev) {
196 int err;
198 ret = filemap_fdatawrite(bdev->bd_inode->i_mapping);
199 err = filemap_fdatawait(bdev->bd_inode->i_mapping);
200 if (!ret)
201 ret = err;
203 return ret;
205 EXPORT_SYMBOL(sync_blockdev);
208 * Write out and wait upon all dirty data associated with this
209 * superblock. Filesystem data as well as the underlying block
210 * device. Takes the superblock lock.
212 int fsync_super(struct super_block *sb)
214 sync_inodes_sb(sb, 0);
215 DQUOT_SYNC(sb);
216 lock_super(sb);
217 if (sb->s_dirt && sb->s_op->write_super)
218 sb->s_op->write_super(sb);
219 unlock_super(sb);
220 if (sb->s_op->sync_fs)
221 sb->s_op->sync_fs(sb, 1);
222 sync_blockdev(sb->s_bdev);
223 sync_inodes_sb(sb, 1);
225 return sync_blockdev(sb->s_bdev);
229 * Write out and wait upon all dirty data associated with this
230 * device. Filesystem data as well as the underlying block
231 * device. Takes the superblock lock.
233 int fsync_bdev(struct block_device *bdev)
235 struct super_block *sb = get_super(bdev);
236 if (sb) {
237 int res = fsync_super(sb);
238 drop_super(sb);
239 return res;
241 return sync_blockdev(bdev);
245 * sync everything. Start out by waking pdflush, because that writes back
246 * all queues in parallel.
248 static void do_sync(unsigned long wait)
250 wakeup_bdflush(0);
251 sync_inodes(0); /* All mappings, inodes and their blockdevs */
252 DQUOT_SYNC(NULL);
253 sync_supers(); /* Write the superblocks */
254 sync_filesystems(0); /* Start syncing the filesystems */
255 sync_filesystems(wait); /* Waitingly sync the filesystems */
256 sync_inodes(wait); /* Mappings, inodes and blockdevs, again. */
257 if (!wait)
258 printk("Emergency Sync complete\n");
261 asmlinkage long sys_sync(void)
263 do_sync(1);
264 return 0;
267 void emergency_sync(void)
269 pdflush_operation(do_sync, 0);
273 * Generic function to fsync a file.
275 * filp may be NULL if called via the msync of a vma.
278 int file_fsync(struct file *filp, struct dentry *dentry, int datasync)
280 struct inode * inode = dentry->d_inode;
281 struct super_block * sb;
282 int ret;
284 /* sync the inode to buffers */
285 write_inode_now(inode, 0);
287 /* sync the superblock to buffers */
288 sb = inode->i_sb;
289 lock_super(sb);
290 if (sb->s_op->write_super)
291 sb->s_op->write_super(sb);
292 unlock_super(sb);
294 /* .. finally sync the buffers to disk */
295 ret = sync_blockdev(sb->s_bdev);
296 return ret;
299 asmlinkage long sys_fsync(unsigned int fd)
301 struct file * file;
302 struct dentry * dentry;
303 struct inode * inode;
304 int ret, err;
306 ret = -EBADF;
307 file = fget(fd);
308 if (!file)
309 goto out;
311 dentry = file->f_dentry;
312 inode = dentry->d_inode;
314 ret = -EINVAL;
315 if (!file->f_op || !file->f_op->fsync) {
316 /* Why? We can still call filemap_fdatawrite */
317 goto out_putf;
320 /* We need to protect against concurrent writers.. */
321 down(&inode->i_sem);
322 ret = filemap_fdatawrite(inode->i_mapping);
323 err = file->f_op->fsync(file, dentry, 0);
324 if (!ret)
325 ret = err;
326 err = filemap_fdatawait(inode->i_mapping);
327 if (!ret)
328 ret = err;
329 up(&inode->i_sem);
331 out_putf:
332 fput(file);
333 out:
334 return ret;
337 asmlinkage long sys_fdatasync(unsigned int fd)
339 struct file * file;
340 struct dentry * dentry;
341 struct inode * inode;
342 int ret, err;
344 ret = -EBADF;
345 file = fget(fd);
346 if (!file)
347 goto out;
349 dentry = file->f_dentry;
350 inode = dentry->d_inode;
352 ret = -EINVAL;
353 if (!file->f_op || !file->f_op->fsync)
354 goto out_putf;
356 down(&inode->i_sem);
357 ret = filemap_fdatawrite(inode->i_mapping);
358 err = file->f_op->fsync(file, dentry, 1);
359 if (!ret)
360 ret = err;
361 err = filemap_fdatawait(inode->i_mapping);
362 if (!ret)
363 ret = err;
364 up(&inode->i_sem);
366 out_putf:
367 fput(file);
368 out:
369 return ret;
373 * Various filesystems appear to want __find_get_block to be non-blocking.
374 * But it's the page lock which protects the buffers. To get around this,
375 * we get exclusion from try_to_free_buffers with the blockdev mapping's
376 * private_lock.
378 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
379 * may be quite high. This code could TryLock the page, and if that
380 * succeeds, there is no need to take private_lock. (But if
381 * private_lock is contended then so is mapping->page_lock).
383 static struct buffer_head *
384 __find_get_block_slow(struct block_device *bdev, sector_t block, int unused)
386 struct inode *bd_inode = bdev->bd_inode;
387 struct address_space *bd_mapping = bd_inode->i_mapping;
388 struct buffer_head *ret = NULL;
389 unsigned long index;
390 struct buffer_head *bh;
391 struct buffer_head *head;
392 struct page *page;
394 index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
395 page = find_get_page(bd_mapping, index);
396 if (!page)
397 goto out;
399 spin_lock(&bd_mapping->private_lock);
400 if (!page_has_buffers(page))
401 goto out_unlock;
402 head = page_buffers(page);
403 bh = head;
404 do {
405 if (bh->b_blocknr == block) {
406 ret = bh;
407 get_bh(bh);
408 goto out_unlock;
410 bh = bh->b_this_page;
411 } while (bh != head);
412 buffer_error();
413 out_unlock:
414 spin_unlock(&bd_mapping->private_lock);
415 page_cache_release(page);
416 out:
417 return ret;
420 /* If invalidate_buffers() will trash dirty buffers, it means some kind
421 of fs corruption is going on. Trashing dirty data always imply losing
422 information that was supposed to be just stored on the physical layer
423 by the user.
425 Thus invalidate_buffers in general usage is not allwowed to trash
426 dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
427 be preserved. These buffers are simply skipped.
429 We also skip buffers which are still in use. For example this can
430 happen if a userspace program is reading the block device.
432 NOTE: In the case where the user removed a removable-media-disk even if
433 there's still dirty data not synced on disk (due a bug in the device driver
434 or due an error of the user), by not destroying the dirty buffers we could
435 generate corruption also on the next media inserted, thus a parameter is
436 necessary to handle this case in the most safe way possible (trying
437 to not corrupt also the new disk inserted with the data belonging to
438 the old now corrupted disk). Also for the ramdisk the natural thing
439 to do in order to release the ramdisk memory is to destroy dirty buffers.
441 These are two special cases. Normal usage imply the device driver
442 to issue a sync on the device (without waiting I/O completion) and
443 then an invalidate_buffers call that doesn't trash dirty buffers.
445 For handling cache coherency with the blkdev pagecache the 'update' case
446 is been introduced. It is needed to re-read from disk any pinned
447 buffer. NOTE: re-reading from disk is destructive so we can do it only
448 when we assume nobody is changing the buffercache under our I/O and when
449 we think the disk contains more recent information than the buffercache.
450 The update == 1 pass marks the buffers we need to update, the update == 2
451 pass does the actual I/O. */
452 void invalidate_bdev(struct block_device *bdev, int destroy_dirty_buffers)
454 invalidate_bh_lrus();
456 * FIXME: what about destroy_dirty_buffers?
457 * We really want to use invalidate_inode_pages2() for
458 * that, but not until that's cleaned up.
460 invalidate_inode_pages(bdev->bd_inode->i_mapping);
464 * Kick pdflush then try to free up some ZONE_NORMAL memory.
466 static void free_more_memory(void)
468 struct zone *zone;
469 pg_data_t *pgdat;
471 wakeup_bdflush(1024);
472 blk_run_queues();
473 yield();
475 for_each_pgdat(pgdat) {
476 zone = pgdat->node_zonelists[GFP_NOFS&GFP_ZONEMASK].zones[0];
477 if (zone)
478 try_to_free_pages(zone, GFP_NOFS, 0);
483 * I/O completion handler for block_read_full_page() - pages
484 * which come unlocked at the end of I/O.
486 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
488 static spinlock_t page_uptodate_lock = SPIN_LOCK_UNLOCKED;
489 unsigned long flags;
490 struct buffer_head *tmp;
491 struct page *page;
492 int page_uptodate = 1;
494 BUG_ON(!buffer_async_read(bh));
496 page = bh->b_page;
497 if (uptodate) {
498 set_buffer_uptodate(bh);
499 } else {
500 clear_buffer_uptodate(bh);
501 buffer_io_error(bh);
502 SetPageError(page);
506 * Be _very_ careful from here on. Bad things can happen if
507 * two buffer heads end IO at almost the same time and both
508 * decide that the page is now completely done.
510 spin_lock_irqsave(&page_uptodate_lock, flags);
511 clear_buffer_async_read(bh);
512 unlock_buffer(bh);
513 tmp = bh;
514 do {
515 if (!buffer_uptodate(tmp))
516 page_uptodate = 0;
517 if (buffer_async_read(tmp)) {
518 BUG_ON(!buffer_locked(tmp));
519 goto still_busy;
521 tmp = tmp->b_this_page;
522 } while (tmp != bh);
523 spin_unlock_irqrestore(&page_uptodate_lock, flags);
526 * If none of the buffers had errors and they are all
527 * uptodate then we can set the page uptodate.
529 if (page_uptodate && !PageError(page))
530 SetPageUptodate(page);
531 unlock_page(page);
532 return;
534 still_busy:
535 spin_unlock_irqrestore(&page_uptodate_lock, flags);
536 return;
540 * Completion handler for block_write_full_page() - pages which are unlocked
541 * during I/O, and which have PageWriteback cleared upon I/O completion.
543 void end_buffer_async_write(struct buffer_head *bh, int uptodate)
545 static spinlock_t page_uptodate_lock = SPIN_LOCK_UNLOCKED;
546 unsigned long flags;
547 struct buffer_head *tmp;
548 struct page *page;
550 BUG_ON(!buffer_async_write(bh));
552 page = bh->b_page;
553 if (uptodate) {
554 set_buffer_uptodate(bh);
555 } else {
556 buffer_io_error(bh);
557 clear_buffer_uptodate(bh);
558 SetPageError(page);
561 spin_lock_irqsave(&page_uptodate_lock, flags);
562 clear_buffer_async_write(bh);
563 unlock_buffer(bh);
564 tmp = bh->b_this_page;
565 while (tmp != bh) {
566 if (buffer_async_write(tmp)) {
567 BUG_ON(!buffer_locked(tmp));
568 goto still_busy;
570 tmp = tmp->b_this_page;
572 spin_unlock_irqrestore(&page_uptodate_lock, flags);
573 end_page_writeback(page);
574 return;
576 still_busy:
577 spin_unlock_irqrestore(&page_uptodate_lock, flags);
578 return;
582 * If a page's buffers are under async readin (end_buffer_async_read
583 * completion) then there is a possibility that another thread of
584 * control could lock one of the buffers after it has completed
585 * but while some of the other buffers have not completed. This
586 * locked buffer would confuse end_buffer_async_read() into not unlocking
587 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
588 * that this buffer is not under async I/O.
590 * The page comes unlocked when it has no locked buffer_async buffers
591 * left.
593 * PageLocked prevents anyone starting new async I/O reads any of
594 * the buffers.
596 * PageWriteback is used to prevent simultaneous writeout of the same
597 * page.
599 * PageLocked prevents anyone from starting writeback of a page which is
600 * under read I/O (PageWriteback is only ever set against a locked page).
602 void mark_buffer_async_read(struct buffer_head *bh)
604 bh->b_end_io = end_buffer_async_read;
605 set_buffer_async_read(bh);
607 EXPORT_SYMBOL(mark_buffer_async_read);
609 void mark_buffer_async_write(struct buffer_head *bh)
611 bh->b_end_io = end_buffer_async_write;
612 set_buffer_async_write(bh);
614 EXPORT_SYMBOL(mark_buffer_async_write);
618 * fs/buffer.c contains helper functions for buffer-backed address space's
619 * fsync functions. A common requirement for buffer-based filesystems is
620 * that certain data from the backing blockdev needs to be written out for
621 * a successful fsync(). For example, ext2 indirect blocks need to be
622 * written back and waited upon before fsync() returns.
624 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
625 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
626 * management of a list of dependent buffers at ->i_mapping->private_list.
628 * Locking is a little subtle: try_to_free_buffers() will remove buffers
629 * from their controlling inode's queue when they are being freed. But
630 * try_to_free_buffers() will be operating against the *blockdev* mapping
631 * at the time, not against the S_ISREG file which depends on those buffers.
632 * So the locking for private_list is via the private_lock in the address_space
633 * which backs the buffers. Which is different from the address_space
634 * against which the buffers are listed. So for a particular address_space,
635 * mapping->private_lock does *not* protect mapping->private_list! In fact,
636 * mapping->private_list will always be protected by the backing blockdev's
637 * ->private_lock.
639 * Which introduces a requirement: all buffers on an address_space's
640 * ->private_list must be from the same address_space: the blockdev's.
642 * address_spaces which do not place buffers at ->private_list via these
643 * utility functions are free to use private_lock and private_list for
644 * whatever they want. The only requirement is that list_empty(private_list)
645 * be true at clear_inode() time.
647 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
648 * filesystems should do that. invalidate_inode_buffers() should just go
649 * BUG_ON(!list_empty).
651 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
652 * take an address_space, not an inode. And it should be called
653 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
654 * queued up.
656 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
657 * list if it is already on a list. Because if the buffer is on a list,
658 * it *must* already be on the right one. If not, the filesystem is being
659 * silly. This will save a ton of locking. But first we have to ensure
660 * that buffers are taken *off* the old inode's list when they are freed
661 * (presumably in truncate). That requires careful auditing of all
662 * filesystems (do it inside bforget()). It could also be done by bringing
663 * b_inode back.
666 void buffer_insert_list(spinlock_t *lock,
667 struct buffer_head *bh, struct list_head *list)
669 spin_lock(lock);
670 list_move_tail(&bh->b_assoc_buffers, list);
671 spin_unlock(lock);
675 * The buffer's backing address_space's private_lock must be held
677 static inline void __remove_assoc_queue(struct buffer_head *bh)
679 list_del_init(&bh->b_assoc_buffers);
682 int inode_has_buffers(struct inode *inode)
684 return !list_empty(&inode->i_data.private_list);
688 * osync is designed to support O_SYNC io. It waits synchronously for
689 * all already-submitted IO to complete, but does not queue any new
690 * writes to the disk.
692 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
693 * you dirty the buffers, and then use osync_inode_buffers to wait for
694 * completion. Any other dirty buffers which are not yet queued for
695 * write will not be flushed to disk by the osync.
697 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
699 struct buffer_head *bh;
700 struct list_head *p;
701 int err = 0;
703 spin_lock(lock);
704 repeat:
705 list_for_each_prev(p, list) {
706 bh = BH_ENTRY(p);
707 if (buffer_locked(bh)) {
708 get_bh(bh);
709 spin_unlock(lock);
710 wait_on_buffer(bh);
711 if (!buffer_uptodate(bh))
712 err = -EIO;
713 brelse(bh);
714 spin_lock(lock);
715 goto repeat;
718 spin_unlock(lock);
719 return err;
723 * sync_mapping_buffers - write out and wait upon a mapping's "associated"
724 * buffers
725 * @buffer_mapping - the mapping which backs the buffers' data
726 * @mapping - the mapping which wants those buffers written
728 * Starts I/O against the buffers at mapping->private_list, and waits upon
729 * that I/O.
731 * Basically, this is a convenience function for fsync(). @buffer_mapping is
732 * the blockdev which "owns" the buffers and @mapping is a file or directory
733 * which needs those buffers to be written for a successful fsync().
735 int sync_mapping_buffers(struct address_space *mapping)
737 struct address_space *buffer_mapping = mapping->assoc_mapping;
739 if (buffer_mapping == NULL || list_empty(&mapping->private_list))
740 return 0;
742 return fsync_buffers_list(&buffer_mapping->private_lock,
743 &mapping->private_list);
745 EXPORT_SYMBOL(sync_mapping_buffers);
748 * Called when we've recently written block `bblock', and it is known that
749 * `bblock' was for a buffer_boundary() buffer. This means that the block at
750 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
751 * dirty, schedule it for IO. So that indirects merge nicely with their data.
753 void write_boundary_block(struct block_device *bdev,
754 sector_t bblock, unsigned blocksize)
756 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
757 if (bh) {
758 if (buffer_dirty(bh))
759 ll_rw_block(WRITE, 1, &bh);
760 put_bh(bh);
764 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
766 struct address_space *mapping = inode->i_mapping;
767 struct address_space *buffer_mapping = bh->b_page->mapping;
769 mark_buffer_dirty(bh);
770 if (!mapping->assoc_mapping) {
771 mapping->assoc_mapping = buffer_mapping;
772 } else {
773 if (mapping->assoc_mapping != buffer_mapping)
774 BUG();
776 if (list_empty(&bh->b_assoc_buffers))
777 buffer_insert_list(&buffer_mapping->private_lock,
778 bh, &mapping->private_list);
780 EXPORT_SYMBOL(mark_buffer_dirty_inode);
783 * Add a page to the dirty page list.
785 * It is a sad fact of life that this function is called from several places
786 * deeply under spinlocking. It may not sleep.
788 * If the page has buffers, the uptodate buffers are set dirty, to preserve
789 * dirty-state coherency between the page and the buffers. It the page does
790 * not have buffers then when they are later attached they will all be set
791 * dirty.
793 * The buffers are dirtied before the page is dirtied. There's a small race
794 * window in which a writepage caller may see the page cleanness but not the
795 * buffer dirtiness. That's fine. If this code were to set the page dirty
796 * before the buffers, a concurrent writepage caller could clear the page dirty
797 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
798 * page on the dirty page list.
800 * There is also a small window where the page is dirty, and not on dirty_pages.
801 * Also a possibility that by the time the page is added to dirty_pages, it has
802 * been set clean. The page lists are somewhat approximate in this regard.
803 * It's better to have clean pages accidentally attached to dirty_pages than to
804 * leave dirty pages attached to clean_pages.
806 * We use private_lock to lock against try_to_free_buffers while using the
807 * page's buffer list. Also use this to protect against clean buffers being
808 * added to the page after it was set dirty.
810 * FIXME: may need to call ->reservepage here as well. That's rather up to the
811 * address_space though.
813 * For now, we treat swapper_space specially. It doesn't use the normal
814 * block a_ops.
816 int __set_page_dirty_buffers(struct page *page)
818 struct address_space * const mapping = page->mapping;
819 int ret = 0;
821 if (mapping == NULL) {
822 SetPageDirty(page);
823 goto out;
826 spin_lock(&mapping->private_lock);
827 if (page_has_buffers(page)) {
828 struct buffer_head *head = page_buffers(page);
829 struct buffer_head *bh = head;
831 do {
832 if (buffer_uptodate(bh))
833 set_buffer_dirty(bh);
834 else
835 buffer_error();
836 bh = bh->b_this_page;
837 } while (bh != head);
839 spin_unlock(&mapping->private_lock);
841 if (!TestSetPageDirty(page)) {
842 spin_lock(&mapping->page_lock);
843 if (page->mapping) { /* Race with truncate? */
844 if (!mapping->backing_dev_info->memory_backed)
845 inc_page_state(nr_dirty);
846 list_del(&page->list);
847 list_add(&page->list, &mapping->dirty_pages);
849 spin_unlock(&mapping->page_lock);
850 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
853 out:
854 return ret;
856 EXPORT_SYMBOL(__set_page_dirty_buffers);
859 * Write out and wait upon a list of buffers.
861 * We have conflicting pressures: we want to make sure that all
862 * initially dirty buffers get waited on, but that any subsequently
863 * dirtied buffers don't. After all, we don't want fsync to last
864 * forever if somebody is actively writing to the file.
866 * Do this in two main stages: first we copy dirty buffers to a
867 * temporary inode list, queueing the writes as we go. Then we clean
868 * up, waiting for those writes to complete.
870 * During this second stage, any subsequent updates to the file may end
871 * up refiling the buffer on the original inode's dirty list again, so
872 * there is a chance we will end up with a buffer queued for write but
873 * not yet completed on that list. So, as a final cleanup we go through
874 * the osync code to catch these locked, dirty buffers without requeuing
875 * any newly dirty buffers for write.
877 int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
879 struct buffer_head *bh;
880 struct list_head tmp;
881 int err = 0, err2;
883 INIT_LIST_HEAD(&tmp);
885 spin_lock(lock);
886 while (!list_empty(list)) {
887 bh = BH_ENTRY(list->next);
888 list_del_init(&bh->b_assoc_buffers);
889 if (buffer_dirty(bh) || buffer_locked(bh)) {
890 list_add(&bh->b_assoc_buffers, &tmp);
891 if (buffer_dirty(bh)) {
892 get_bh(bh);
893 spin_unlock(lock);
895 * Ensure any pending I/O completes so that
896 * ll_rw_block() actually writes the current
897 * contents - it is a noop if I/O is still in
898 * flight on potentially older contents.
900 wait_on_buffer(bh);
901 ll_rw_block(WRITE, 1, &bh);
902 brelse(bh);
903 spin_lock(lock);
908 while (!list_empty(&tmp)) {
909 bh = BH_ENTRY(tmp.prev);
910 __remove_assoc_queue(bh);
911 get_bh(bh);
912 spin_unlock(lock);
913 wait_on_buffer(bh);
914 if (!buffer_uptodate(bh))
915 err = -EIO;
916 brelse(bh);
917 spin_lock(lock);
920 spin_unlock(lock);
921 err2 = osync_buffers_list(lock, list);
922 if (err)
923 return err;
924 else
925 return err2;
929 * Invalidate any and all dirty buffers on a given inode. We are
930 * probably unmounting the fs, but that doesn't mean we have already
931 * done a sync(). Just drop the buffers from the inode list.
933 * NOTE: we take the inode's blockdev's mapping's private_lock. Which
934 * assumes that all the buffers are against the blockdev. Not true
935 * for reiserfs.
937 void invalidate_inode_buffers(struct inode *inode)
939 if (inode_has_buffers(inode)) {
940 struct address_space *mapping = &inode->i_data;
941 struct list_head *list = &mapping->private_list;
942 struct address_space *buffer_mapping = mapping->assoc_mapping;
944 spin_lock(&buffer_mapping->private_lock);
945 while (!list_empty(list))
946 __remove_assoc_queue(BH_ENTRY(list->next));
947 spin_unlock(&buffer_mapping->private_lock);
952 * Remove any clean buffers from the inode's buffer list. This is called
953 * when we're trying to free the inode itself. Those buffers can pin it.
955 * Returns true if all buffers were removed.
957 int remove_inode_buffers(struct inode *inode)
959 int ret = 1;
961 if (inode_has_buffers(inode)) {
962 struct address_space *mapping = &inode->i_data;
963 struct list_head *list = &mapping->private_list;
964 struct address_space *buffer_mapping = mapping->assoc_mapping;
966 spin_lock(&buffer_mapping->private_lock);
967 while (!list_empty(list)) {
968 struct buffer_head *bh = BH_ENTRY(list->next);
969 if (buffer_dirty(bh)) {
970 ret = 0;
971 break;
973 __remove_assoc_queue(bh);
975 spin_unlock(&buffer_mapping->private_lock);
977 return ret;
981 * Create the appropriate buffers when given a page for data area and
982 * the size of each buffer.. Use the bh->b_this_page linked list to
983 * follow the buffers created. Return NULL if unable to create more
984 * buffers.
986 * The retry flag is used to differentiate async IO (paging, swapping)
987 * which may not fail from ordinary buffer allocations.
989 static struct buffer_head *
990 create_buffers(struct page * page, unsigned long size, int retry)
992 struct buffer_head *bh, *head;
993 long offset;
995 try_again:
996 head = NULL;
997 offset = PAGE_SIZE;
998 while ((offset -= size) >= 0) {
999 bh = alloc_buffer_head(GFP_NOFS);
1000 if (!bh)
1001 goto no_grow;
1003 bh->b_bdev = NULL;
1004 bh->b_this_page = head;
1005 bh->b_blocknr = -1;
1006 head = bh;
1008 bh->b_state = 0;
1009 atomic_set(&bh->b_count, 0);
1010 bh->b_size = size;
1012 /* Link the buffer to its page */
1013 set_bh_page(bh, page, offset);
1015 bh->b_end_io = NULL;
1017 return head;
1019 * In case anything failed, we just free everything we got.
1021 no_grow:
1022 if (head) {
1023 do {
1024 bh = head;
1025 head = head->b_this_page;
1026 free_buffer_head(bh);
1027 } while (head);
1031 * Return failure for non-async IO requests. Async IO requests
1032 * are not allowed to fail, so we have to wait until buffer heads
1033 * become available. But we don't want tasks sleeping with
1034 * partially complete buffers, so all were released above.
1036 if (!retry)
1037 return NULL;
1039 /* We're _really_ low on memory. Now we just
1040 * wait for old buffer heads to become free due to
1041 * finishing IO. Since this is an async request and
1042 * the reserve list is empty, we're sure there are
1043 * async buffer heads in use.
1045 free_more_memory();
1046 goto try_again;
1049 static inline void
1050 link_dev_buffers(struct page *page, struct buffer_head *head)
1052 struct buffer_head *bh, *tail;
1054 bh = head;
1055 do {
1056 tail = bh;
1057 bh = bh->b_this_page;
1058 } while (bh);
1059 tail->b_this_page = head;
1060 __set_page_buffers(page, head);
1064 * Initialise the state of a blockdev page's buffers.
1066 static void
1067 init_page_buffers(struct page *page, struct block_device *bdev,
1068 int block, int size)
1070 struct buffer_head *head = page_buffers(page);
1071 struct buffer_head *bh = head;
1072 unsigned int b_state;
1074 b_state = 1 << BH_Mapped;
1075 if (PageUptodate(page))
1076 b_state |= 1 << BH_Uptodate;
1078 do {
1079 if (!(bh->b_state & (1 << BH_Mapped))) {
1080 init_buffer(bh, NULL, NULL);
1081 bh->b_bdev = bdev;
1082 bh->b_blocknr = block;
1083 bh->b_state = b_state;
1085 block++;
1086 bh = bh->b_this_page;
1087 } while (bh != head);
1091 * Create the page-cache page that contains the requested block.
1093 * This is user purely for blockdev mappings.
1095 static struct page *
1096 grow_dev_page(struct block_device *bdev, unsigned long block,
1097 unsigned long index, int size)
1099 struct inode *inode = bdev->bd_inode;
1100 struct page *page;
1101 struct buffer_head *bh;
1103 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
1104 if (!page)
1105 return NULL;
1107 if (!PageLocked(page))
1108 BUG();
1110 if (page_has_buffers(page)) {
1111 bh = page_buffers(page);
1112 if (bh->b_size == size)
1113 return page;
1114 if (!try_to_free_buffers(page))
1115 goto failed;
1119 * Allocate some buffers for this page
1121 bh = create_buffers(page, size, 0);
1122 if (!bh)
1123 goto failed;
1126 * Link the page to the buffers and initialise them. Take the
1127 * lock to be atomic wrt __find_get_block(), which does not
1128 * run under the page lock.
1130 spin_lock(&inode->i_mapping->private_lock);
1131 link_dev_buffers(page, bh);
1132 init_page_buffers(page, bdev, block, size);
1133 spin_unlock(&inode->i_mapping->private_lock);
1134 return page;
1136 failed:
1137 buffer_error();
1138 unlock_page(page);
1139 page_cache_release(page);
1140 return NULL;
1144 * Create buffers for the specified block device block's page. If
1145 * that page was dirty, the buffers are set dirty also.
1147 * Except that's a bug. Attaching dirty buffers to a dirty
1148 * blockdev's page can result in filesystem corruption, because
1149 * some of those buffers may be aliases of filesystem data.
1150 * grow_dev_page() will go BUG() if this happens.
1152 static inline int
1153 grow_buffers(struct block_device *bdev, unsigned long block, int size)
1155 struct page *page;
1156 unsigned long index;
1157 int sizebits;
1159 /* Size must be multiple of hard sectorsize */
1160 if (size & (bdev_hardsect_size(bdev)-1))
1161 BUG();
1162 if (size < 512 || size > PAGE_SIZE)
1163 BUG();
1165 sizebits = -1;
1166 do {
1167 sizebits++;
1168 } while ((size << sizebits) < PAGE_SIZE);
1170 index = block >> sizebits;
1171 block = index << sizebits;
1173 /* Create a page with the proper size buffers.. */
1174 page = grow_dev_page(bdev, block, index, size);
1175 if (!page)
1176 return 0;
1177 unlock_page(page);
1178 page_cache_release(page);
1179 return 1;
1182 struct buffer_head *
1183 __getblk_slow(struct block_device *bdev, sector_t block, int size)
1185 for (;;) {
1186 struct buffer_head * bh;
1188 bh = __find_get_block(bdev, block, size);
1189 if (bh)
1190 return bh;
1192 if (!grow_buffers(bdev, block, size))
1193 free_more_memory();
1198 * The relationship between dirty buffers and dirty pages:
1200 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1201 * the page appears on its address_space.dirty_pages list.
1203 * At all times, the dirtiness of the buffers represents the dirtiness of
1204 * subsections of the page. If the page has buffers, the page dirty bit is
1205 * merely a hint about the true dirty state.
1207 * When a page is set dirty in its entirety, all its buffers are marked dirty
1208 * (if the page has buffers).
1210 * When a buffer is marked dirty, its page is dirtied, but the page's other
1211 * buffers are not.
1213 * Also. When blockdev buffers are explicitly read with bread(), they
1214 * individually become uptodate. But their backing page remains not
1215 * uptodate - even if all of its buffers are uptodate. A subsequent
1216 * block_read_full_page() against that page will discover all the uptodate
1217 * buffers, will set the page uptodate and will perform no I/O.
1221 * mark_buffer_dirty - mark a buffer_head as needing writeout
1223 * mark_buffer_dirty() will set the dirty bit against the buffer,
1224 * then set its backing page dirty, then attach the page to its
1225 * address_space's dirty_pages list and then attach the address_space's
1226 * inode to its superblock's dirty inode list.
1228 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1229 * mapping->page_lock and the global inode_lock.
1231 void mark_buffer_dirty(struct buffer_head *bh)
1233 if (!buffer_uptodate(bh))
1234 buffer_error();
1235 if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
1236 __set_page_dirty_nobuffers(bh->b_page);
1240 * Decrement a buffer_head's reference count. If all buffers against a page
1241 * have zero reference count, are clean and unlocked, and if the page is clean
1242 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1243 * in preparation for freeing it (sometimes, rarely, buffers are removed from
1244 * a page but it ends up not being freed, and buffers may later be reattached).
1246 void __brelse(struct buffer_head * buf)
1248 if (atomic_read(&buf->b_count)) {
1249 put_bh(buf);
1250 return;
1252 printk(KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1253 buffer_error(); /* For the stack backtrace */
1257 * bforget() is like brelse(), except it discards any
1258 * potentially dirty data.
1260 void __bforget(struct buffer_head *bh)
1262 clear_buffer_dirty(bh);
1263 if (!list_empty(&bh->b_assoc_buffers)) {
1264 struct address_space *buffer_mapping = bh->b_page->mapping;
1266 spin_lock(&buffer_mapping->private_lock);
1267 list_del_init(&bh->b_assoc_buffers);
1268 spin_unlock(&buffer_mapping->private_lock);
1270 __brelse(bh);
1273 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1275 lock_buffer(bh);
1276 if (buffer_uptodate(bh)) {
1277 unlock_buffer(bh);
1278 return bh;
1279 } else {
1280 if (buffer_dirty(bh))
1281 buffer_error();
1282 get_bh(bh);
1283 bh->b_end_io = end_buffer_io_sync;
1284 submit_bh(READ, bh);
1285 wait_on_buffer(bh);
1286 if (buffer_uptodate(bh))
1287 return bh;
1289 brelse(bh);
1290 return NULL;
1294 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1295 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1296 * refcount elevated by one when they're in an LRU. A buffer can only appear
1297 * once in a particular CPU's LRU. A single buffer can be present in multiple
1298 * CPU's LRUs at the same time.
1300 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1301 * sb_find_get_block().
1303 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1304 * a local interrupt disable for that.
1307 #define BH_LRU_SIZE 8
1309 struct bh_lru {
1310 struct buffer_head *bhs[BH_LRU_SIZE];
1313 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{0}};
1315 #ifdef CONFIG_SMP
1316 #define bh_lru_lock() local_irq_disable()
1317 #define bh_lru_unlock() local_irq_enable()
1318 #else
1319 #define bh_lru_lock() preempt_disable()
1320 #define bh_lru_unlock() preempt_enable()
1321 #endif
1323 static inline void check_irqs_on(void)
1325 #ifdef irqs_disabled
1326 BUG_ON(irqs_disabled());
1327 #endif
1331 * The LRU management algorithm is dopey-but-simple. Sorry.
1333 static void bh_lru_install(struct buffer_head *bh)
1335 struct buffer_head *evictee = NULL;
1336 struct bh_lru *lru;
1338 check_irqs_on();
1339 bh_lru_lock();
1340 lru = &per_cpu(bh_lrus, smp_processor_id());
1341 if (lru->bhs[0] != bh) {
1342 struct buffer_head *bhs[BH_LRU_SIZE];
1343 int in;
1344 int out = 0;
1346 get_bh(bh);
1347 bhs[out++] = bh;
1348 for (in = 0; in < BH_LRU_SIZE; in++) {
1349 struct buffer_head *bh2 = lru->bhs[in];
1351 if (bh2 == bh) {
1352 __brelse(bh2);
1353 } else {
1354 if (out >= BH_LRU_SIZE) {
1355 BUG_ON(evictee != NULL);
1356 evictee = bh2;
1357 } else {
1358 bhs[out++] = bh2;
1362 while (out < BH_LRU_SIZE)
1363 bhs[out++] = NULL;
1364 memcpy(lru->bhs, bhs, sizeof(bhs));
1366 bh_lru_unlock();
1368 if (evictee)
1369 __brelse(evictee);
1373 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1375 static inline struct buffer_head *
1376 lookup_bh_lru(struct block_device *bdev, sector_t block, int size)
1378 struct buffer_head *ret = NULL;
1379 struct bh_lru *lru;
1380 int i;
1382 check_irqs_on();
1383 bh_lru_lock();
1384 lru = &per_cpu(bh_lrus, smp_processor_id());
1385 for (i = 0; i < BH_LRU_SIZE; i++) {
1386 struct buffer_head *bh = lru->bhs[i];
1388 if (bh && bh->b_bdev == bdev &&
1389 bh->b_blocknr == block && bh->b_size == size) {
1390 if (i) {
1391 while (i) {
1392 lru->bhs[i] = lru->bhs[i - 1];
1393 i--;
1395 lru->bhs[0] = bh;
1397 get_bh(bh);
1398 ret = bh;
1399 break;
1402 bh_lru_unlock();
1403 return ret;
1407 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1408 * it in the LRU and mark it as accessed. If it is not present then return
1409 * NULL
1411 struct buffer_head *
1412 __find_get_block(struct block_device *bdev, sector_t block, int size)
1414 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1416 if (bh == NULL) {
1417 bh = __find_get_block_slow(bdev, block, size);
1418 if (bh)
1419 bh_lru_install(bh);
1421 if (bh)
1422 touch_buffer(bh);
1423 return bh;
1425 EXPORT_SYMBOL(__find_get_block);
1428 * __getblk will locate (and, if necessary, create) the buffer_head
1429 * which corresponds to the passed block_device, block and size. The
1430 * returned buffer has its reference count incremented.
1432 * __getblk() cannot fail - it just keeps trying. If you pass it an
1433 * illegal block number, __getblk() will happily return a buffer_head
1434 * which represents the non-existent block. Very weird.
1436 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1437 * attempt is failing. FIXME, perhaps?
1439 struct buffer_head *
1440 __getblk(struct block_device *bdev, sector_t block, int size)
1442 struct buffer_head *bh = __find_get_block(bdev, block, size);
1444 if (bh == NULL)
1445 bh = __getblk_slow(bdev, block, size);
1446 return bh;
1448 EXPORT_SYMBOL(__getblk);
1451 * __bread() - reads a specified block and returns the bh
1452 * @block: number of block
1453 * @size: size (in bytes) to read
1455 * Reads a specified block, and returns buffer head that contains it.
1456 * It returns NULL if the block was unreadable.
1458 struct buffer_head *
1459 __bread(struct block_device *bdev, sector_t block, int size)
1461 struct buffer_head *bh = __getblk(bdev, block, size);
1463 if (!buffer_uptodate(bh))
1464 bh = __bread_slow(bh);
1465 return bh;
1467 EXPORT_SYMBOL(__bread);
1470 * invalidate_bh_lrus() is called rarely - at unmount. Because it is only for
1471 * unmount it only needs to ensure that all buffers from the target device are
1472 * invalidated on return and it doesn't need to worry about new buffers from
1473 * that device being added - the unmount code has to prevent that.
1475 static void invalidate_bh_lru(void *arg)
1477 const int cpu = get_cpu();
1478 struct bh_lru *b = &per_cpu(bh_lrus, cpu);
1479 int i;
1481 for (i = 0; i < BH_LRU_SIZE; i++) {
1482 brelse(b->bhs[i]);
1483 b->bhs[i] = NULL;
1485 put_cpu();
1488 static void invalidate_bh_lrus(void)
1490 on_each_cpu(invalidate_bh_lru, NULL, 1, 1);
1493 void set_bh_page(struct buffer_head *bh,
1494 struct page *page, unsigned long offset)
1496 bh->b_page = page;
1497 if (offset >= PAGE_SIZE)
1498 BUG();
1499 if (PageHighMem(page))
1501 * This catches illegal uses and preserves the offset:
1503 bh->b_data = (char *)(0 + offset);
1504 else
1505 bh->b_data = page_address(page) + offset;
1507 EXPORT_SYMBOL(set_bh_page);
1510 * Called when truncating a buffer on a page completely.
1512 static inline void discard_buffer(struct buffer_head * bh)
1514 lock_buffer(bh);
1515 clear_buffer_dirty(bh);
1516 bh->b_bdev = NULL;
1517 clear_buffer_mapped(bh);
1518 clear_buffer_req(bh);
1519 clear_buffer_new(bh);
1520 clear_buffer_delay(bh);
1521 unlock_buffer(bh);
1525 * try_to_release_page() - release old fs-specific metadata on a page
1527 * @page: the page which the kernel is trying to free
1528 * @gfp_mask: memory allocation flags (and I/O mode)
1530 * The address_space is to try to release any data against the page
1531 * (presumably at page->private). If the release was successful, return `1'.
1532 * Otherwise return zero.
1534 * The @gfp_mask argument specifies whether I/O may be performed to release
1535 * this page (__GFP_IO), and whether the call may block (__GFP_WAIT).
1537 * NOTE: @gfp_mask may go away, and this function may become non-blocking.
1539 int try_to_release_page(struct page *page, int gfp_mask)
1541 struct address_space * const mapping = page->mapping;
1543 if (!PageLocked(page))
1544 BUG();
1545 if (PageWriteback(page))
1546 return 0;
1548 if (mapping && mapping->a_ops->releasepage)
1549 return mapping->a_ops->releasepage(page, gfp_mask);
1550 return try_to_free_buffers(page);
1554 * block_invalidatepage - invalidate part of all of a buffer-backed page
1556 * @page: the page which is affected
1557 * @offset: the index of the truncation point
1559 * block_invalidatepage() is called when all or part of the page has become
1560 * invalidatedby a truncate operation.
1562 * block_invalidatepage() does not have to release all buffers, but it must
1563 * ensure that no dirty buffer is left outside @offset and that no I/O
1564 * is underway against any of the blocks which are outside the truncation
1565 * point. Because the caller is about to free (and possibly reuse) those
1566 * blocks on-disk.
1568 int block_invalidatepage(struct page *page, unsigned long offset)
1570 struct buffer_head *head, *bh, *next;
1571 unsigned int curr_off = 0;
1572 int ret = 1;
1574 BUG_ON(!PageLocked(page));
1575 if (!page_has_buffers(page))
1576 goto out;
1578 head = page_buffers(page);
1579 bh = head;
1580 do {
1581 unsigned int next_off = curr_off + bh->b_size;
1582 next = bh->b_this_page;
1585 * is this block fully invalidated?
1587 if (offset <= curr_off)
1588 discard_buffer(bh);
1589 curr_off = next_off;
1590 bh = next;
1591 } while (bh != head);
1594 * We release buffers only if the entire page is being invalidated.
1595 * The get_block cached value has been unconditionally invalidated,
1596 * so real IO is not possible anymore.
1598 if (offset == 0)
1599 ret = try_to_release_page(page, 0);
1600 out:
1601 return ret;
1603 EXPORT_SYMBOL(block_invalidatepage);
1606 * We attach and possibly dirty the buffers atomically wrt
1607 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
1608 * is already excluded via the page lock.
1610 void create_empty_buffers(struct page *page,
1611 unsigned long blocksize, unsigned long b_state)
1613 struct buffer_head *bh, *head, *tail;
1615 head = create_buffers(page, blocksize, 1);
1616 bh = head;
1617 do {
1618 bh->b_state |= b_state;
1619 tail = bh;
1620 bh = bh->b_this_page;
1621 } while (bh);
1622 tail->b_this_page = head;
1624 spin_lock(&page->mapping->private_lock);
1625 if (PageUptodate(page) || PageDirty(page)) {
1626 bh = head;
1627 do {
1628 if (PageDirty(page))
1629 set_buffer_dirty(bh);
1630 if (PageUptodate(page))
1631 set_buffer_uptodate(bh);
1632 bh = bh->b_this_page;
1633 } while (bh != head);
1635 __set_page_buffers(page, head);
1636 spin_unlock(&page->mapping->private_lock);
1638 EXPORT_SYMBOL(create_empty_buffers);
1641 * We are taking a block for data and we don't want any output from any
1642 * buffer-cache aliases starting from return from that function and
1643 * until the moment when something will explicitly mark the buffer
1644 * dirty (hopefully that will not happen until we will free that block ;-)
1645 * We don't even need to mark it not-uptodate - nobody can expect
1646 * anything from a newly allocated buffer anyway. We used to used
1647 * unmap_buffer() for such invalidation, but that was wrong. We definitely
1648 * don't want to mark the alias unmapped, for example - it would confuse
1649 * anyone who might pick it with bread() afterwards...
1651 * Also.. Note that bforget() doesn't lock the buffer. So there can
1652 * be writeout I/O going on against recently-freed buffers. We don't
1653 * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1654 * only if we really need to. That happens here.
1656 void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1658 struct buffer_head *old_bh;
1660 old_bh = __find_get_block_slow(bdev, block, 0);
1661 if (old_bh) {
1662 #if 0 /* This happens. Later. */
1663 if (buffer_dirty(old_bh))
1664 buffer_error();
1665 #endif
1666 clear_buffer_dirty(old_bh);
1667 wait_on_buffer(old_bh);
1668 clear_buffer_req(old_bh);
1669 __brelse(old_bh);
1672 EXPORT_SYMBOL(unmap_underlying_metadata);
1675 * NOTE! All mapped/uptodate combinations are valid:
1677 * Mapped Uptodate Meaning
1679 * No No "unknown" - must do get_block()
1680 * No Yes "hole" - zero-filled
1681 * Yes No "allocated" - allocated on disk, not read in
1682 * Yes Yes "valid" - allocated and up-to-date in memory.
1684 * "Dirty" is valid only with the last case (mapped+uptodate).
1688 * While block_write_full_page is writing back the dirty buffers under
1689 * the page lock, whoever dirtied the buffers may decide to clean them
1690 * again at any time. We handle that by only looking at the buffer
1691 * state inside lock_buffer().
1693 * If block_write_full_page() is called for regular writeback
1694 * (called_for_sync() is false) then it will redirty a page which has a locked
1695 * buffer. This only can happen if someone has written the buffer directly,
1696 * with submit_bh(). At the address_space level PageWriteback prevents this
1697 * contention from occurring.
1699 static int __block_write_full_page(struct inode *inode, struct page *page,
1700 get_block_t *get_block, struct writeback_control *wbc)
1702 int err;
1703 unsigned long block;
1704 unsigned long last_block;
1705 struct buffer_head *bh, *head;
1706 int nr_underway = 0;
1708 BUG_ON(!PageLocked(page));
1710 last_block = (inode->i_size - 1) >> inode->i_blkbits;
1712 if (!page_has_buffers(page)) {
1713 if (!PageUptodate(page))
1714 buffer_error();
1715 create_empty_buffers(page, 1 << inode->i_blkbits,
1716 (1 << BH_Dirty)|(1 << BH_Uptodate));
1720 * Be very careful. We have no exclusion from __set_page_dirty_buffers
1721 * here, and the (potentially unmapped) buffers may become dirty at
1722 * any time. If a buffer becomes dirty here after we've inspected it
1723 * then we just miss that fact, and the page stays dirty.
1725 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1726 * handle that here by just cleaning them.
1729 block = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1730 head = page_buffers(page);
1731 bh = head;
1734 * Get all the dirty buffers mapped to disk addresses and
1735 * handle any aliases from the underlying blockdev's mapping.
1737 do {
1738 if (block > last_block) {
1740 * mapped buffers outside i_size will occur, because
1741 * this page can be outside i_size when there is a
1742 * truncate in progress.
1744 * if (buffer_mapped(bh))
1745 * buffer_error();
1748 * The buffer was zeroed by block_write_full_page()
1750 clear_buffer_dirty(bh);
1751 set_buffer_uptodate(bh);
1752 } else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
1753 if (buffer_new(bh))
1754 buffer_error();
1755 err = get_block(inode, block, bh, 1);
1756 if (err)
1757 goto recover;
1758 if (buffer_new(bh)) {
1759 /* blockdev mappings never come here */
1760 clear_buffer_new(bh);
1761 unmap_underlying_metadata(bh->b_bdev,
1762 bh->b_blocknr);
1765 bh = bh->b_this_page;
1766 block++;
1767 } while (bh != head);
1769 do {
1770 get_bh(bh);
1771 if (buffer_mapped(bh) && buffer_dirty(bh)) {
1772 if (wbc->sync_mode != WB_SYNC_NONE) {
1773 lock_buffer(bh);
1774 } else {
1775 if (test_set_buffer_locked(bh)) {
1776 __set_page_dirty_nobuffers(page);
1777 continue;
1780 if (test_clear_buffer_dirty(bh)) {
1781 if (!buffer_uptodate(bh))
1782 buffer_error();
1783 mark_buffer_async_write(bh);
1784 } else {
1785 unlock_buffer(bh);
1788 } while ((bh = bh->b_this_page) != head);
1790 BUG_ON(PageWriteback(page));
1791 SetPageWriteback(page); /* Keeps try_to_free_buffers() away */
1792 unlock_page(page);
1795 * The page may come unlocked any time after the *first* submit_bh()
1796 * call. Be careful with its buffers.
1798 do {
1799 struct buffer_head *next = bh->b_this_page;
1800 if (buffer_async_write(bh)) {
1801 submit_bh(WRITE, bh);
1802 nr_underway++;
1804 put_bh(bh);
1805 bh = next;
1806 } while (bh != head);
1808 err = 0;
1809 done:
1810 if (nr_underway == 0) {
1812 * The page was marked dirty, but the buffers were
1813 * clean. Someone wrote them back by hand with
1814 * ll_rw_block/submit_bh. A rare case.
1816 int uptodate = 1;
1817 do {
1818 if (!buffer_uptodate(bh)) {
1819 uptodate = 0;
1820 break;
1822 bh = bh->b_this_page;
1823 } while (bh != head);
1824 if (uptodate)
1825 SetPageUptodate(page);
1826 end_page_writeback(page);
1828 return err;
1830 recover:
1832 * ENOSPC, or some other error. We may already have added some
1833 * blocks to the file, so we need to write these out to avoid
1834 * exposing stale data.
1835 * The page is currently locked and not marked for writeback
1837 bh = head;
1838 /* Recovery: lock and submit the mapped buffers */
1839 do {
1840 get_bh(bh);
1841 if (buffer_mapped(bh) && buffer_dirty(bh)) {
1842 lock_buffer(bh);
1843 mark_buffer_async_write(bh);
1844 } else {
1846 * The buffer may have been set dirty during
1847 * attachment to a dirty page.
1849 clear_buffer_dirty(bh);
1851 } while ((bh = bh->b_this_page) != head);
1852 SetPageError(page);
1853 BUG_ON(PageWriteback(page));
1854 SetPageWriteback(page);
1855 unlock_page(page);
1856 do {
1857 struct buffer_head *next = bh->b_this_page;
1858 if (buffer_async_write(bh)) {
1859 clear_buffer_dirty(bh);
1860 submit_bh(WRITE, bh);
1861 nr_underway++;
1863 put_bh(bh);
1864 bh = next;
1865 } while (bh != head);
1866 goto done;
1869 static int __block_prepare_write(struct inode *inode, struct page *page,
1870 unsigned from, unsigned to, get_block_t *get_block)
1872 unsigned block_start, block_end;
1873 sector_t block;
1874 int err = 0;
1875 unsigned blocksize, bbits;
1876 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1878 BUG_ON(!PageLocked(page));
1879 BUG_ON(from > PAGE_CACHE_SIZE);
1880 BUG_ON(to > PAGE_CACHE_SIZE);
1881 BUG_ON(from > to);
1883 blocksize = 1 << inode->i_blkbits;
1884 if (!page_has_buffers(page))
1885 create_empty_buffers(page, blocksize, 0);
1886 head = page_buffers(page);
1888 bbits = inode->i_blkbits;
1889 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1891 for(bh = head, block_start = 0; bh != head || !block_start;
1892 block++, block_start=block_end, bh = bh->b_this_page) {
1893 block_end = block_start + blocksize;
1894 if (block_end <= from || block_start >= to) {
1895 if (PageUptodate(page)) {
1896 if (!buffer_uptodate(bh))
1897 set_buffer_uptodate(bh);
1899 continue;
1901 if (buffer_new(bh))
1902 clear_buffer_new(bh);
1903 if (!buffer_mapped(bh)) {
1904 err = get_block(inode, block, bh, 1);
1905 if (err)
1906 goto out;
1907 if (buffer_new(bh)) {
1908 clear_buffer_new(bh);
1909 unmap_underlying_metadata(bh->b_bdev,
1910 bh->b_blocknr);
1911 if (PageUptodate(page)) {
1912 if (!buffer_mapped(bh))
1913 buffer_error();
1914 set_buffer_uptodate(bh);
1915 continue;
1917 if (block_end > to || block_start < from) {
1918 void *kaddr;
1920 kaddr = kmap_atomic(page, KM_USER0);
1921 if (block_end > to)
1922 memset(kaddr+to, 0,
1923 block_end-to);
1924 if (block_start < from)
1925 memset(kaddr+block_start,
1926 0, from-block_start);
1927 flush_dcache_page(page);
1928 kunmap_atomic(kaddr, KM_USER0);
1930 continue;
1933 if (PageUptodate(page)) {
1934 if (!buffer_uptodate(bh))
1935 set_buffer_uptodate(bh);
1936 continue;
1938 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1939 (block_start < from || block_end > to)) {
1940 ll_rw_block(READ, 1, &bh);
1941 *wait_bh++=bh;
1945 * If we issued read requests - let them complete.
1947 while(wait_bh > wait) {
1948 wait_on_buffer(*--wait_bh);
1949 if (!buffer_uptodate(*wait_bh))
1950 return -EIO;
1952 return 0;
1953 out:
1955 * Zero out any newly allocated blocks to avoid exposing stale
1956 * data. If BH_New is set, we know that the block was newly
1957 * allocated in the above loop.
1959 bh = head;
1960 block_start = 0;
1961 do {
1962 block_end = block_start+blocksize;
1963 if (block_end <= from)
1964 goto next_bh;
1965 if (block_start >= to)
1966 break;
1967 if (buffer_new(bh)) {
1968 void *kaddr;
1970 clear_buffer_new(bh);
1971 if (buffer_uptodate(bh))
1972 buffer_error();
1973 kaddr = kmap_atomic(page, KM_USER0);
1974 memset(kaddr+block_start, 0, bh->b_size);
1975 kunmap_atomic(kaddr, KM_USER0);
1976 set_buffer_uptodate(bh);
1977 mark_buffer_dirty(bh);
1979 next_bh:
1980 block_start = block_end;
1981 bh = bh->b_this_page;
1982 } while (bh != head);
1983 return err;
1986 static int __block_commit_write(struct inode *inode, struct page *page,
1987 unsigned from, unsigned to)
1989 unsigned block_start, block_end;
1990 int partial = 0;
1991 unsigned blocksize;
1992 struct buffer_head *bh, *head;
1994 blocksize = 1 << inode->i_blkbits;
1996 for(bh = head = page_buffers(page), block_start = 0;
1997 bh != head || !block_start;
1998 block_start=block_end, bh = bh->b_this_page) {
1999 block_end = block_start + blocksize;
2000 if (block_end <= from || block_start >= to) {
2001 if (!buffer_uptodate(bh))
2002 partial = 1;
2003 } else {
2004 set_buffer_uptodate(bh);
2005 mark_buffer_dirty(bh);
2010 * If this is a partial write which happened to make all buffers
2011 * uptodate then we can optimize away a bogus readpage() for
2012 * the next read(). Here we 'discover' whether the page went
2013 * uptodate as a result of this (potentially partial) write.
2015 if (!partial)
2016 SetPageUptodate(page);
2017 return 0;
2021 * Generic "read page" function for block devices that have the normal
2022 * get_block functionality. This is most of the block device filesystems.
2023 * Reads the page asynchronously --- the unlock_buffer() and
2024 * set/clear_buffer_uptodate() functions propagate buffer state into the
2025 * page struct once IO has completed.
2027 int block_read_full_page(struct page *page, get_block_t *get_block)
2029 struct inode *inode = page->mapping->host;
2030 sector_t iblock, lblock;
2031 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2032 unsigned int blocksize;
2033 int nr, i;
2034 int fully_mapped = 1;
2036 if (!PageLocked(page))
2037 PAGE_BUG(page);
2038 if (PageUptodate(page))
2039 buffer_error();
2040 blocksize = 1 << inode->i_blkbits;
2041 if (!page_has_buffers(page))
2042 create_empty_buffers(page, blocksize, 0);
2043 head = page_buffers(page);
2045 iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2046 lblock = (inode->i_size+blocksize-1) >> inode->i_blkbits;
2047 bh = head;
2048 nr = 0;
2049 i = 0;
2051 do {
2052 if (buffer_uptodate(bh))
2053 continue;
2055 if (!buffer_mapped(bh)) {
2056 fully_mapped = 0;
2057 if (iblock < lblock) {
2058 if (get_block(inode, iblock, bh, 0))
2059 SetPageError(page);
2061 if (!buffer_mapped(bh)) {
2062 void *kaddr = kmap_atomic(page, KM_USER0);
2063 memset(kaddr + i * blocksize, 0, blocksize);
2064 flush_dcache_page(page);
2065 kunmap_atomic(kaddr, KM_USER0);
2066 set_buffer_uptodate(bh);
2067 continue;
2070 * get_block() might have updated the buffer
2071 * synchronously
2073 if (buffer_uptodate(bh))
2074 continue;
2076 arr[nr++] = bh;
2077 } while (i++, iblock++, (bh = bh->b_this_page) != head);
2079 if (fully_mapped)
2080 SetPageMappedToDisk(page);
2082 if (!nr) {
2084 * All buffers are uptodate - we can set the page uptodate
2085 * as well. But not if get_block() returned an error.
2087 if (!PageError(page))
2088 SetPageUptodate(page);
2089 unlock_page(page);
2090 return 0;
2093 /* Stage two: lock the buffers */
2094 for (i = 0; i < nr; i++) {
2095 bh = arr[i];
2096 lock_buffer(bh);
2097 mark_buffer_async_read(bh);
2101 * Stage 3: start the IO. Check for uptodateness
2102 * inside the buffer lock in case another process reading
2103 * the underlying blockdev brought it uptodate (the sct fix).
2105 for (i = 0; i < nr; i++) {
2106 bh = arr[i];
2107 if (buffer_uptodate(bh))
2108 end_buffer_async_read(bh, 1);
2109 else
2110 submit_bh(READ, bh);
2112 return 0;
2115 /* utility function for filesystems that need to do work on expanding
2116 * truncates. Uses prepare/commit_write to allow the filesystem to
2117 * deal with the hole.
2119 int generic_cont_expand(struct inode *inode, loff_t size)
2121 struct address_space *mapping = inode->i_mapping;
2122 struct page *page;
2123 unsigned long index, offset, limit;
2124 int err;
2126 err = -EFBIG;
2127 limit = current->rlim[RLIMIT_FSIZE].rlim_cur;
2128 if (limit != RLIM_INFINITY && size > (loff_t)limit) {
2129 send_sig(SIGXFSZ, current, 0);
2130 goto out;
2132 if (size > inode->i_sb->s_maxbytes)
2133 goto out;
2135 offset = (size & (PAGE_CACHE_SIZE-1)); /* Within page */
2137 /* ugh. in prepare/commit_write, if from==to==start of block, we
2138 ** skip the prepare. make sure we never send an offset for the start
2139 ** of a block
2141 if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {
2142 offset++;
2144 index = size >> PAGE_CACHE_SHIFT;
2145 err = -ENOMEM;
2146 page = grab_cache_page(mapping, index);
2147 if (!page)
2148 goto out;
2149 err = mapping->a_ops->prepare_write(NULL, page, offset, offset);
2150 if (!err) {
2151 err = mapping->a_ops->commit_write(NULL, page, offset, offset);
2153 unlock_page(page);
2154 page_cache_release(page);
2155 if (err > 0)
2156 err = 0;
2157 out:
2158 return err;
2162 * For moronic filesystems that do not allow holes in file.
2163 * We may have to extend the file.
2166 int cont_prepare_write(struct page *page, unsigned offset,
2167 unsigned to, get_block_t *get_block, loff_t *bytes)
2169 struct address_space *mapping = page->mapping;
2170 struct inode *inode = mapping->host;
2171 struct page *new_page;
2172 unsigned long pgpos;
2173 long status;
2174 unsigned zerofrom;
2175 unsigned blocksize = 1 << inode->i_blkbits;
2176 void *kaddr;
2178 while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) {
2179 status = -ENOMEM;
2180 new_page = grab_cache_page(mapping, pgpos);
2181 if (!new_page)
2182 goto out;
2183 /* we might sleep */
2184 if (*bytes>>PAGE_CACHE_SHIFT != pgpos) {
2185 unlock_page(new_page);
2186 page_cache_release(new_page);
2187 continue;
2189 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2190 if (zerofrom & (blocksize-1)) {
2191 *bytes |= (blocksize-1);
2192 (*bytes)++;
2194 status = __block_prepare_write(inode, new_page, zerofrom,
2195 PAGE_CACHE_SIZE, get_block);
2196 if (status)
2197 goto out_unmap;
2198 kaddr = kmap_atomic(new_page, KM_USER0);
2199 memset(kaddr+zerofrom, 0, PAGE_CACHE_SIZE-zerofrom);
2200 flush_dcache_page(new_page);
2201 kunmap_atomic(kaddr, KM_USER0);
2202 __block_commit_write(inode, new_page,
2203 zerofrom, PAGE_CACHE_SIZE);
2204 unlock_page(new_page);
2205 page_cache_release(new_page);
2208 if (page->index < pgpos) {
2209 /* completely inside the area */
2210 zerofrom = offset;
2211 } else {
2212 /* page covers the boundary, find the boundary offset */
2213 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2215 /* if we will expand the thing last block will be filled */
2216 if (to > zerofrom && (zerofrom & (blocksize-1))) {
2217 *bytes |= (blocksize-1);
2218 (*bytes)++;
2221 /* starting below the boundary? Nothing to zero out */
2222 if (offset <= zerofrom)
2223 zerofrom = offset;
2225 status = __block_prepare_write(inode, page, zerofrom, to, get_block);
2226 if (status)
2227 goto out1;
2228 if (zerofrom < offset) {
2229 kaddr = kmap_atomic(page, KM_USER0);
2230 memset(kaddr+zerofrom, 0, offset-zerofrom);
2231 flush_dcache_page(page);
2232 kunmap_atomic(kaddr, KM_USER0);
2233 __block_commit_write(inode, page, zerofrom, offset);
2235 return 0;
2236 out1:
2237 ClearPageUptodate(page);
2238 return status;
2240 out_unmap:
2241 ClearPageUptodate(new_page);
2242 unlock_page(new_page);
2243 page_cache_release(new_page);
2244 out:
2245 return status;
2248 int block_prepare_write(struct page *page, unsigned from, unsigned to,
2249 get_block_t *get_block)
2251 struct inode *inode = page->mapping->host;
2252 int err = __block_prepare_write(inode, page, from, to, get_block);
2253 if (err)
2254 ClearPageUptodate(page);
2255 return err;
2258 int block_commit_write(struct page *page, unsigned from, unsigned to)
2260 struct inode *inode = page->mapping->host;
2261 __block_commit_write(inode,page,from,to);
2262 return 0;
2265 int generic_commit_write(struct file *file, struct page *page,
2266 unsigned from, unsigned to)
2268 struct inode *inode = page->mapping->host;
2269 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2270 __block_commit_write(inode,page,from,to);
2271 if (pos > inode->i_size) {
2272 inode->i_size = pos;
2273 mark_inode_dirty(inode);
2275 return 0;
2279 * On entry, the page is fully not uptodate.
2280 * On exit the page is fully uptodate in the areas outside (from,to)
2282 int nobh_prepare_write(struct page *page, unsigned from, unsigned to,
2283 get_block_t *get_block)
2285 struct inode *inode = page->mapping->host;
2286 const unsigned blkbits = inode->i_blkbits;
2287 const unsigned blocksize = 1 << blkbits;
2288 struct buffer_head map_bh;
2289 struct buffer_head *read_bh[MAX_BUF_PER_PAGE];
2290 unsigned block_in_page;
2291 unsigned block_start;
2292 sector_t block_in_file;
2293 char *kaddr;
2294 int nr_reads = 0;
2295 int i;
2296 int ret = 0;
2297 int is_mapped_to_disk = 1;
2298 int dirtied_it = 0;
2300 if (PageMappedToDisk(page))
2301 return 0;
2303 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2304 map_bh.b_page = page;
2307 * We loop across all blocks in the page, whether or not they are
2308 * part of the affected region. This is so we can discover if the
2309 * page is fully mapped-to-disk.
2311 for (block_start = 0, block_in_page = 0;
2312 block_start < PAGE_CACHE_SIZE;
2313 block_in_page++, block_start += blocksize) {
2314 unsigned block_end = block_start + blocksize;
2315 int create;
2317 map_bh.b_state = 0;
2318 create = 1;
2319 if (block_start >= to)
2320 create = 0;
2321 ret = get_block(inode, block_in_file + block_in_page,
2322 &map_bh, create);
2323 if (ret)
2324 goto failed;
2325 if (!buffer_mapped(&map_bh))
2326 is_mapped_to_disk = 0;
2327 if (buffer_new(&map_bh))
2328 unmap_underlying_metadata(map_bh.b_bdev,
2329 map_bh.b_blocknr);
2330 if (PageUptodate(page))
2331 continue;
2332 if (buffer_new(&map_bh) || !buffer_mapped(&map_bh)) {
2333 kaddr = kmap_atomic(page, KM_USER0);
2334 if (block_start < from) {
2335 memset(kaddr+block_start, 0, from-block_start);
2336 dirtied_it = 1;
2338 if (block_end > to) {
2339 memset(kaddr + to, 0, block_end - to);
2340 dirtied_it = 1;
2342 flush_dcache_page(page);
2343 kunmap_atomic(kaddr, KM_USER0);
2344 continue;
2346 if (buffer_uptodate(&map_bh))
2347 continue; /* reiserfs does this */
2348 if (block_start < from || block_end > to) {
2349 struct buffer_head *bh = alloc_buffer_head(GFP_NOFS);
2351 if (!bh) {
2352 ret = -ENOMEM;
2353 goto failed;
2355 bh->b_state = map_bh.b_state;
2356 atomic_set(&bh->b_count, 0);
2357 bh->b_this_page = 0;
2358 bh->b_page = page;
2359 bh->b_blocknr = map_bh.b_blocknr;
2360 bh->b_size = blocksize;
2361 bh->b_data = (char *)(long)block_start;
2362 bh->b_bdev = map_bh.b_bdev;
2363 bh->b_private = NULL;
2364 read_bh[nr_reads++] = bh;
2368 if (nr_reads) {
2369 ll_rw_block(READ, nr_reads, read_bh);
2370 for (i = 0; i < nr_reads; i++) {
2371 wait_on_buffer(read_bh[i]);
2372 if (!buffer_uptodate(read_bh[i]))
2373 ret = -EIO;
2374 free_buffer_head(read_bh[i]);
2375 read_bh[i] = NULL;
2377 if (ret)
2378 goto failed;
2381 if (is_mapped_to_disk)
2382 SetPageMappedToDisk(page);
2383 SetPageUptodate(page);
2386 * Setting the page dirty here isn't necessary for the prepare_write
2387 * function - commit_write will do that. But if/when this function is
2388 * used within the pagefault handler to ensure that all mmapped pages
2389 * have backing space in the filesystem, we will need to dirty the page
2390 * if its contents were altered.
2392 if (dirtied_it)
2393 set_page_dirty(page);
2395 return 0;
2397 failed:
2398 for (i = 0; i < nr_reads; i++) {
2399 if (read_bh[i])
2400 free_buffer_head(read_bh[i]);
2404 * Error recovery is pretty slack. Clear the page and mark it dirty
2405 * so we'll later zero out any blocks which _were_ allocated.
2407 kaddr = kmap_atomic(page, KM_USER0);
2408 memset(kaddr, 0, PAGE_CACHE_SIZE);
2409 kunmap_atomic(kaddr, KM_USER0);
2410 SetPageUptodate(page);
2411 set_page_dirty(page);
2412 return ret;
2414 EXPORT_SYMBOL(nobh_prepare_write);
2416 int nobh_commit_write(struct file *file, struct page *page,
2417 unsigned from, unsigned to)
2419 struct inode *inode = page->mapping->host;
2420 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2422 set_page_dirty(page);
2423 if (pos > inode->i_size) {
2424 inode->i_size = pos;
2425 mark_inode_dirty(inode);
2427 return 0;
2429 EXPORT_SYMBOL(nobh_commit_write);
2432 * This function assumes that ->prepare_write() uses nobh_prepare_write().
2434 int nobh_truncate_page(struct address_space *mapping, loff_t from)
2436 struct inode *inode = mapping->host;
2437 unsigned blocksize = 1 << inode->i_blkbits;
2438 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2439 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2440 unsigned to;
2441 struct page *page;
2442 struct address_space_operations *a_ops = mapping->a_ops;
2443 char *kaddr;
2444 int ret = 0;
2446 if ((offset & (blocksize - 1)) == 0)
2447 goto out;
2449 ret = -ENOMEM;
2450 page = grab_cache_page(mapping, index);
2451 if (!page)
2452 goto out;
2454 to = (offset + blocksize) & ~(blocksize - 1);
2455 ret = a_ops->prepare_write(NULL, page, offset, to);
2456 if (ret == 0) {
2457 kaddr = kmap_atomic(page, KM_USER0);
2458 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2459 flush_dcache_page(page);
2460 kunmap_atomic(kaddr, KM_USER0);
2461 set_page_dirty(page);
2463 unlock_page(page);
2464 page_cache_release(page);
2465 out:
2466 return ret;
2468 EXPORT_SYMBOL(nobh_truncate_page);
2470 int block_truncate_page(struct address_space *mapping,
2471 loff_t from, get_block_t *get_block)
2473 unsigned long index = from >> PAGE_CACHE_SHIFT;
2474 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2475 unsigned blocksize, iblock, length, pos;
2476 struct inode *inode = mapping->host;
2477 struct page *page;
2478 struct buffer_head *bh;
2479 void *kaddr;
2480 int err;
2482 blocksize = 1 << inode->i_blkbits;
2483 length = offset & (blocksize - 1);
2485 /* Block boundary? Nothing to do */
2486 if (!length)
2487 return 0;
2489 length = blocksize - length;
2490 iblock = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2492 page = grab_cache_page(mapping, index);
2493 err = -ENOMEM;
2494 if (!page)
2495 goto out;
2497 if (!page_has_buffers(page))
2498 create_empty_buffers(page, blocksize, 0);
2500 /* Find the buffer that contains "offset" */
2501 bh = page_buffers(page);
2502 pos = blocksize;
2503 while (offset >= pos) {
2504 bh = bh->b_this_page;
2505 iblock++;
2506 pos += blocksize;
2509 err = 0;
2510 if (!buffer_mapped(bh)) {
2511 err = get_block(inode, iblock, bh, 0);
2512 if (err)
2513 goto unlock;
2514 /* unmapped? It's a hole - nothing to do */
2515 if (!buffer_mapped(bh))
2516 goto unlock;
2519 /* Ok, it's mapped. Make sure it's up-to-date */
2520 if (PageUptodate(page))
2521 set_buffer_uptodate(bh);
2523 if (!buffer_uptodate(bh) && !buffer_delay(bh)) {
2524 err = -EIO;
2525 ll_rw_block(READ, 1, &bh);
2526 wait_on_buffer(bh);
2527 /* Uhhuh. Read error. Complain and punt. */
2528 if (!buffer_uptodate(bh))
2529 goto unlock;
2532 kaddr = kmap_atomic(page, KM_USER0);
2533 memset(kaddr + offset, 0, length);
2534 flush_dcache_page(page);
2535 kunmap_atomic(kaddr, KM_USER0);
2537 mark_buffer_dirty(bh);
2538 err = 0;
2540 unlock:
2541 unlock_page(page);
2542 page_cache_release(page);
2543 out:
2544 return err;
2548 * The generic ->writepage function for buffer-backed address_spaces
2550 int block_write_full_page(struct page *page, get_block_t *get_block,
2551 struct writeback_control *wbc)
2553 struct inode * const inode = page->mapping->host;
2554 const unsigned long end_index = inode->i_size >> PAGE_CACHE_SHIFT;
2555 unsigned offset;
2556 void *kaddr;
2558 /* Is the page fully inside i_size? */
2559 if (page->index < end_index)
2560 return __block_write_full_page(inode, page, get_block, wbc);
2562 /* Is the page fully outside i_size? (truncate in progress) */
2563 offset = inode->i_size & (PAGE_CACHE_SIZE-1);
2564 if (page->index >= end_index+1 || !offset) {
2566 * The page may have dirty, unmapped buffers. For example,
2567 * they may have been added in ext3_writepage(). Make them
2568 * freeable here, so the page does not leak.
2570 block_invalidatepage(page, 0);
2571 unlock_page(page);
2572 return -EIO;
2576 * The page straddles i_size. It must be zeroed out on each and every
2577 * writepage invocation because it may be mmapped. "A file is mapped
2578 * in multiples of the page size. For a file that is not a multiple of
2579 * the page size, the remaining memory is zeroed when mapped, and
2580 * writes to that region are not written out to the file."
2582 kaddr = kmap_atomic(page, KM_USER0);
2583 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2584 flush_dcache_page(page);
2585 kunmap_atomic(kaddr, KM_USER0);
2586 return __block_write_full_page(inode, page, get_block, wbc);
2589 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2590 get_block_t *get_block)
2592 struct buffer_head tmp;
2593 struct inode *inode = mapping->host;
2594 tmp.b_state = 0;
2595 tmp.b_blocknr = 0;
2596 get_block(inode, block, &tmp, 0);
2597 return tmp.b_blocknr;
2600 static int end_bio_bh_io_sync(struct bio *bio, unsigned int bytes_done, int err)
2602 struct buffer_head *bh = bio->bi_private;
2604 if (bio->bi_size)
2605 return 1;
2607 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2608 bio_put(bio);
2609 return 0;
2612 int submit_bh(int rw, struct buffer_head * bh)
2614 struct bio *bio;
2616 BUG_ON(!buffer_locked(bh));
2617 BUG_ON(!buffer_mapped(bh));
2618 BUG_ON(!bh->b_end_io);
2620 if ((rw == READ || rw == READA) && buffer_uptodate(bh))
2621 buffer_error();
2622 if (rw == WRITE && !buffer_uptodate(bh))
2623 buffer_error();
2624 if (rw == READ && buffer_dirty(bh))
2625 buffer_error();
2627 set_buffer_req(bh);
2630 * from here on down, it's all bio -- do the initial mapping,
2631 * submit_bio -> generic_make_request may further map this bio around
2633 bio = bio_alloc(GFP_NOIO, 1);
2635 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2636 bio->bi_bdev = bh->b_bdev;
2637 bio->bi_io_vec[0].bv_page = bh->b_page;
2638 bio->bi_io_vec[0].bv_len = bh->b_size;
2639 bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2641 bio->bi_vcnt = 1;
2642 bio->bi_idx = 0;
2643 bio->bi_size = bh->b_size;
2645 bio->bi_end_io = end_bio_bh_io_sync;
2646 bio->bi_private = bh;
2648 return submit_bio(rw, bio);
2652 * ll_rw_block: low-level access to block devices (DEPRECATED)
2653 * @rw: whether to %READ or %WRITE or maybe %READA (readahead)
2654 * @nr: number of &struct buffer_heads in the array
2655 * @bhs: array of pointers to &struct buffer_head
2657 * ll_rw_block() takes an array of pointers to &struct buffer_heads,
2658 * and requests an I/O operation on them, either a %READ or a %WRITE.
2659 * The third %READA option is described in the documentation for
2660 * generic_make_request() which ll_rw_block() calls.
2662 * This function drops any buffer that it cannot get a lock on (with the
2663 * BH_Lock state bit), any buffer that appears to be clean when doing a
2664 * write request, and any buffer that appears to be up-to-date when doing
2665 * read request. Further it marks as clean buffers that are processed for
2666 * writing (the buffer cache won't assume that they are actually clean until
2667 * the buffer gets unlocked).
2669 * ll_rw_block sets b_end_io to simple completion handler that marks
2670 * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2671 * any waiters.
2673 * All of the buffers must be for the same device, and must also be a
2674 * multiple of the current approved size for the device.
2676 void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2678 int i;
2680 for (i = 0; i < nr; i++) {
2681 struct buffer_head *bh = bhs[i];
2683 if (test_set_buffer_locked(bh))
2684 continue;
2686 get_bh(bh);
2687 bh->b_end_io = end_buffer_io_sync;
2688 if (rw == WRITE) {
2689 if (test_clear_buffer_dirty(bh)) {
2690 submit_bh(WRITE, bh);
2691 continue;
2693 } else {
2694 if (!buffer_uptodate(bh)) {
2695 submit_bh(rw, bh);
2696 continue;
2699 unlock_buffer(bh);
2700 put_bh(bh);
2705 * For a data-integrity writeout, we need to wait upon any in-progress I/O
2706 * and then start new I/O and then wait upon it.
2708 void sync_dirty_buffer(struct buffer_head *bh)
2710 WARN_ON(atomic_read(&bh->b_count) < 1);
2711 lock_buffer(bh);
2712 if (test_clear_buffer_dirty(bh)) {
2713 get_bh(bh);
2714 bh->b_end_io = end_buffer_io_sync;
2715 submit_bh(WRITE, bh);
2716 wait_on_buffer(bh);
2717 } else {
2718 unlock_buffer(bh);
2723 * Sanity checks for try_to_free_buffers.
2725 static void check_ttfb_buffer(struct page *page, struct buffer_head *bh)
2727 if (!buffer_uptodate(bh) && !buffer_req(bh)) {
2728 if (PageUptodate(page) && page->mapping
2729 && buffer_mapped(bh) /* discard_buffer */
2730 && S_ISBLK(page->mapping->host->i_mode))
2732 buffer_error();
2738 * try_to_free_buffers() checks if all the buffers on this particular page
2739 * are unused, and releases them if so.
2741 * Exclusion against try_to_free_buffers may be obtained by either
2742 * locking the page or by holding its mapping's private_lock.
2744 * If the page is dirty but all the buffers are clean then we need to
2745 * be sure to mark the page clean as well. This is because the page
2746 * may be against a block device, and a later reattachment of buffers
2747 * to a dirty page will set *all* buffers dirty. Which would corrupt
2748 * filesystem data on the same device.
2750 * The same applies to regular filesystem pages: if all the buffers are
2751 * clean then we set the page clean and proceed. To do that, we require
2752 * total exclusion from __set_page_dirty_buffers(). That is obtained with
2753 * private_lock.
2755 * try_to_free_buffers() is non-blocking.
2757 static inline int buffer_busy(struct buffer_head *bh)
2759 return atomic_read(&bh->b_count) |
2760 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
2763 static int
2764 drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
2766 struct buffer_head *head = page_buffers(page);
2767 struct buffer_head *bh;
2768 int was_uptodate = 1;
2770 bh = head;
2771 do {
2772 check_ttfb_buffer(page, bh);
2773 if (buffer_busy(bh))
2774 goto failed;
2775 if (!buffer_uptodate(bh) && !buffer_req(bh))
2776 was_uptodate = 0;
2777 bh = bh->b_this_page;
2778 } while (bh != head);
2780 if (!was_uptodate && PageUptodate(page))
2781 buffer_error();
2783 do {
2784 struct buffer_head *next = bh->b_this_page;
2786 if (!list_empty(&bh->b_assoc_buffers))
2787 __remove_assoc_queue(bh);
2788 bh = next;
2789 } while (bh != head);
2790 *buffers_to_free = head;
2791 __clear_page_buffers(page);
2792 return 1;
2793 failed:
2794 return 0;
2797 int try_to_free_buffers(struct page *page)
2799 struct address_space * const mapping = page->mapping;
2800 struct buffer_head *buffers_to_free = NULL;
2801 int ret = 0;
2803 BUG_ON(!PageLocked(page));
2804 if (PageWriteback(page))
2805 return 0;
2807 if (mapping == NULL) { /* swapped-in anon page */
2808 ret = drop_buffers(page, &buffers_to_free);
2809 goto out;
2812 spin_lock(&mapping->private_lock);
2813 ret = drop_buffers(page, &buffers_to_free);
2814 if (ret && !PageSwapCache(page)) {
2816 * If the filesystem writes its buffers by hand (eg ext3)
2817 * then we can have clean buffers against a dirty page. We
2818 * clean the page here; otherwise later reattachment of buffers
2819 * could encounter a non-uptodate page, which is unresolvable.
2820 * This only applies in the rare case where try_to_free_buffers
2821 * succeeds but the page is not freed.
2823 clear_page_dirty(page);
2825 spin_unlock(&mapping->private_lock);
2826 out:
2827 if (buffers_to_free) {
2828 struct buffer_head *bh = buffers_to_free;
2830 do {
2831 struct buffer_head *next = bh->b_this_page;
2832 free_buffer_head(bh);
2833 bh = next;
2834 } while (bh != buffers_to_free);
2836 return ret;
2838 EXPORT_SYMBOL(try_to_free_buffers);
2840 int block_sync_page(struct page *page)
2842 blk_run_queues();
2843 return 0;
2847 * There are no bdflush tunables left. But distributions are
2848 * still running obsolete flush daemons, so we terminate them here.
2850 * Use of bdflush() is deprecated and will be removed in a future kernel.
2851 * The `pdflush' kernel threads fully replace bdflush daemons and this call.
2853 asmlinkage long sys_bdflush(int func, long data)
2855 static int msg_count;
2857 if (!capable(CAP_SYS_ADMIN))
2858 return -EPERM;
2860 if (msg_count < 5) {
2861 msg_count++;
2862 printk(KERN_INFO
2863 "warning: process `%s' used the obsolete bdflush"
2864 " system call\n", current->comm);
2865 printk(KERN_INFO "Fix your initscripts?\n");
2868 if (func == 1)
2869 do_exit(0);
2870 return 0;
2874 * Buffer-head allocation
2876 static kmem_cache_t *bh_cachep;
2879 * Once the number of bh's in the machine exceeds this level, we start
2880 * stripping them in writeback.
2882 static int max_buffer_heads;
2884 int buffer_heads_over_limit;
2886 struct bh_accounting {
2887 int nr; /* Number of live bh's */
2888 int ratelimit; /* Limit cacheline bouncing */
2891 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
2893 static void recalc_bh_state(void)
2895 int i;
2896 int tot = 0;
2898 if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
2899 return;
2900 __get_cpu_var(bh_accounting).ratelimit = 0;
2901 for (i = 0; i < NR_CPUS; i++) {
2902 if (cpu_online(i))
2903 tot += per_cpu(bh_accounting, i).nr;
2905 buffer_heads_over_limit = (tot > max_buffer_heads);
2908 struct buffer_head *alloc_buffer_head(int gfp_flags)
2910 struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
2911 if (ret) {
2912 preempt_disable();
2913 __get_cpu_var(bh_accounting).nr++;
2914 recalc_bh_state();
2915 preempt_enable();
2917 return ret;
2919 EXPORT_SYMBOL(alloc_buffer_head);
2921 void free_buffer_head(struct buffer_head *bh)
2923 BUG_ON(!list_empty(&bh->b_assoc_buffers));
2924 kmem_cache_free(bh_cachep, bh);
2925 preempt_disable();
2926 __get_cpu_var(bh_accounting).nr--;
2927 recalc_bh_state();
2928 preempt_enable();
2930 EXPORT_SYMBOL(free_buffer_head);
2932 static void
2933 init_buffer_head(void *data, kmem_cache_t *cachep, unsigned long flags)
2935 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
2936 SLAB_CTOR_CONSTRUCTOR) {
2937 struct buffer_head * bh = (struct buffer_head *)data;
2939 memset(bh, 0, sizeof(*bh));
2940 INIT_LIST_HEAD(&bh->b_assoc_buffers);
2944 static void buffer_init_cpu(int cpu)
2946 struct bh_accounting *bha = &per_cpu(bh_accounting, cpu);
2947 struct bh_lru *bhl = &per_cpu(bh_lrus, cpu);
2949 bha->nr = 0;
2950 bha->ratelimit = 0;
2951 memset(bhl, 0, sizeof(*bhl));
2954 static int __devinit buffer_cpu_notify(struct notifier_block *self,
2955 unsigned long action, void *hcpu)
2957 long cpu = (long)hcpu;
2958 switch(action) {
2959 case CPU_UP_PREPARE:
2960 buffer_init_cpu(cpu);
2961 break;
2962 default:
2963 break;
2965 return NOTIFY_OK;
2968 static struct notifier_block __devinitdata buffer_nb = {
2969 .notifier_call = buffer_cpu_notify,
2972 void __init buffer_init(void)
2974 int i;
2975 int nrpages;
2977 bh_cachep = kmem_cache_create("buffer_head",
2978 sizeof(struct buffer_head), 0,
2979 0, init_buffer_head, NULL);
2980 for (i = 0; i < ARRAY_SIZE(bh_wait_queue_heads); i++)
2981 init_waitqueue_head(&bh_wait_queue_heads[i].wqh);
2984 * Limit the bh occupancy to 10% of ZONE_NORMAL
2986 nrpages = (nr_free_buffer_pages() * 10) / 100;
2987 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
2988 buffer_cpu_notify(&buffer_nb, (unsigned long)CPU_UP_PREPARE,
2989 (void *)(long)smp_processor_id());
2990 register_cpu_notifier(&buffer_nb);