udf: change maintainer
[linux-2.6/cjktty.git] / fs / buffer.c
blob6f0bddddcf4aadb69c6b1de52eedb97e5735bba6
1 /*
2 * linux/fs/buffer.c
4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
5 */
7 /*
8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
10 * Removed a lot of unnecessary code and simplified things now that
11 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
13 * Speed up hash, lru, and free list operations. Use gfp() for allocating
14 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
16 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
18 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
21 #include <linux/kernel.h>
22 #include <linux/syscalls.h>
23 #include <linux/fs.h>
24 #include <linux/mm.h>
25 #include <linux/percpu.h>
26 #include <linux/slab.h>
27 #include <linux/capability.h>
28 #include <linux/blkdev.h>
29 #include <linux/file.h>
30 #include <linux/quotaops.h>
31 #include <linux/highmem.h>
32 #include <linux/module.h>
33 #include <linux/writeback.h>
34 #include <linux/hash.h>
35 #include <linux/suspend.h>
36 #include <linux/buffer_head.h>
37 #include <linux/task_io_accounting_ops.h>
38 #include <linux/bio.h>
39 #include <linux/notifier.h>
40 #include <linux/cpu.h>
41 #include <linux/bitops.h>
42 #include <linux/mpage.h>
43 #include <linux/bit_spinlock.h>
45 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
47 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
49 inline void
50 init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
52 bh->b_end_io = handler;
53 bh->b_private = private;
56 static int sync_buffer(void *word)
58 struct block_device *bd;
59 struct buffer_head *bh
60 = container_of(word, struct buffer_head, b_state);
62 smp_mb();
63 bd = bh->b_bdev;
64 if (bd)
65 blk_run_address_space(bd->bd_inode->i_mapping);
66 io_schedule();
67 return 0;
70 void __lock_buffer(struct buffer_head *bh)
72 wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
73 TASK_UNINTERRUPTIBLE);
75 EXPORT_SYMBOL(__lock_buffer);
77 void unlock_buffer(struct buffer_head *bh)
79 smp_mb__before_clear_bit();
80 clear_buffer_locked(bh);
81 smp_mb__after_clear_bit();
82 wake_up_bit(&bh->b_state, BH_Lock);
86 * Block until a buffer comes unlocked. This doesn't stop it
87 * from becoming locked again - you have to lock it yourself
88 * if you want to preserve its state.
90 void __wait_on_buffer(struct buffer_head * bh)
92 wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
95 static void
96 __clear_page_buffers(struct page *page)
98 ClearPagePrivate(page);
99 set_page_private(page, 0);
100 page_cache_release(page);
103 static void buffer_io_error(struct buffer_head *bh)
105 char b[BDEVNAME_SIZE];
107 printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
108 bdevname(bh->b_bdev, b),
109 (unsigned long long)bh->b_blocknr);
113 * End-of-IO handler helper function which does not touch the bh after
114 * unlocking it.
115 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
116 * a race there is benign: unlock_buffer() only use the bh's address for
117 * hashing after unlocking the buffer, so it doesn't actually touch the bh
118 * itself.
120 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
122 if (uptodate) {
123 set_buffer_uptodate(bh);
124 } else {
125 /* This happens, due to failed READA attempts. */
126 clear_buffer_uptodate(bh);
128 unlock_buffer(bh);
132 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
133 * unlock the buffer. This is what ll_rw_block uses too.
135 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
137 __end_buffer_read_notouch(bh, uptodate);
138 put_bh(bh);
141 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
143 char b[BDEVNAME_SIZE];
145 if (uptodate) {
146 set_buffer_uptodate(bh);
147 } else {
148 if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
149 buffer_io_error(bh);
150 printk(KERN_WARNING "lost page write due to "
151 "I/O error on %s\n",
152 bdevname(bh->b_bdev, b));
154 set_buffer_write_io_error(bh);
155 clear_buffer_uptodate(bh);
157 unlock_buffer(bh);
158 put_bh(bh);
162 * Write out and wait upon all the dirty data associated with a block
163 * device via its mapping. Does not take the superblock lock.
165 int sync_blockdev(struct block_device *bdev)
167 int ret = 0;
169 if (bdev)
170 ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
171 return ret;
173 EXPORT_SYMBOL(sync_blockdev);
176 * Write out and wait upon all dirty data associated with this
177 * device. Filesystem data as well as the underlying block
178 * device. Takes the superblock lock.
180 int fsync_bdev(struct block_device *bdev)
182 struct super_block *sb = get_super(bdev);
183 if (sb) {
184 int res = fsync_super(sb);
185 drop_super(sb);
186 return res;
188 return sync_blockdev(bdev);
192 * freeze_bdev -- lock a filesystem and force it into a consistent state
193 * @bdev: blockdevice to lock
195 * This takes the block device bd_mount_sem to make sure no new mounts
196 * happen on bdev until thaw_bdev() is called.
197 * If a superblock is found on this device, we take the s_umount semaphore
198 * on it to make sure nobody unmounts until the snapshot creation is done.
200 struct super_block *freeze_bdev(struct block_device *bdev)
202 struct super_block *sb;
204 down(&bdev->bd_mount_sem);
205 sb = get_super(bdev);
206 if (sb && !(sb->s_flags & MS_RDONLY)) {
207 sb->s_frozen = SB_FREEZE_WRITE;
208 smp_wmb();
210 __fsync_super(sb);
212 sb->s_frozen = SB_FREEZE_TRANS;
213 smp_wmb();
215 sync_blockdev(sb->s_bdev);
217 if (sb->s_op->write_super_lockfs)
218 sb->s_op->write_super_lockfs(sb);
221 sync_blockdev(bdev);
222 return sb; /* thaw_bdev releases s->s_umount and bd_mount_sem */
224 EXPORT_SYMBOL(freeze_bdev);
227 * thaw_bdev -- unlock filesystem
228 * @bdev: blockdevice to unlock
229 * @sb: associated superblock
231 * Unlocks the filesystem and marks it writeable again after freeze_bdev().
233 void thaw_bdev(struct block_device *bdev, struct super_block *sb)
235 if (sb) {
236 BUG_ON(sb->s_bdev != bdev);
238 if (sb->s_op->unlockfs)
239 sb->s_op->unlockfs(sb);
240 sb->s_frozen = SB_UNFROZEN;
241 smp_wmb();
242 wake_up(&sb->s_wait_unfrozen);
243 drop_super(sb);
246 up(&bdev->bd_mount_sem);
248 EXPORT_SYMBOL(thaw_bdev);
251 * Various filesystems appear to want __find_get_block to be non-blocking.
252 * But it's the page lock which protects the buffers. To get around this,
253 * we get exclusion from try_to_free_buffers with the blockdev mapping's
254 * private_lock.
256 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
257 * may be quite high. This code could TryLock the page, and if that
258 * succeeds, there is no need to take private_lock. (But if
259 * private_lock is contended then so is mapping->tree_lock).
261 static struct buffer_head *
262 __find_get_block_slow(struct block_device *bdev, sector_t block)
264 struct inode *bd_inode = bdev->bd_inode;
265 struct address_space *bd_mapping = bd_inode->i_mapping;
266 struct buffer_head *ret = NULL;
267 pgoff_t index;
268 struct buffer_head *bh;
269 struct buffer_head *head;
270 struct page *page;
271 int all_mapped = 1;
273 index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
274 page = find_get_page(bd_mapping, index);
275 if (!page)
276 goto out;
278 spin_lock(&bd_mapping->private_lock);
279 if (!page_has_buffers(page))
280 goto out_unlock;
281 head = page_buffers(page);
282 bh = head;
283 do {
284 if (bh->b_blocknr == block) {
285 ret = bh;
286 get_bh(bh);
287 goto out_unlock;
289 if (!buffer_mapped(bh))
290 all_mapped = 0;
291 bh = bh->b_this_page;
292 } while (bh != head);
294 /* we might be here because some of the buffers on this page are
295 * not mapped. This is due to various races between
296 * file io on the block device and getblk. It gets dealt with
297 * elsewhere, don't buffer_error if we had some unmapped buffers
299 if (all_mapped) {
300 printk("__find_get_block_slow() failed. "
301 "block=%llu, b_blocknr=%llu\n",
302 (unsigned long long)block,
303 (unsigned long long)bh->b_blocknr);
304 printk("b_state=0x%08lx, b_size=%zu\n",
305 bh->b_state, bh->b_size);
306 printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
308 out_unlock:
309 spin_unlock(&bd_mapping->private_lock);
310 page_cache_release(page);
311 out:
312 return ret;
315 /* If invalidate_buffers() will trash dirty buffers, it means some kind
316 of fs corruption is going on. Trashing dirty data always imply losing
317 information that was supposed to be just stored on the physical layer
318 by the user.
320 Thus invalidate_buffers in general usage is not allwowed to trash
321 dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
322 be preserved. These buffers are simply skipped.
324 We also skip buffers which are still in use. For example this can
325 happen if a userspace program is reading the block device.
327 NOTE: In the case where the user removed a removable-media-disk even if
328 there's still dirty data not synced on disk (due a bug in the device driver
329 or due an error of the user), by not destroying the dirty buffers we could
330 generate corruption also on the next media inserted, thus a parameter is
331 necessary to handle this case in the most safe way possible (trying
332 to not corrupt also the new disk inserted with the data belonging to
333 the old now corrupted disk). Also for the ramdisk the natural thing
334 to do in order to release the ramdisk memory is to destroy dirty buffers.
336 These are two special cases. Normal usage imply the device driver
337 to issue a sync on the device (without waiting I/O completion) and
338 then an invalidate_buffers call that doesn't trash dirty buffers.
340 For handling cache coherency with the blkdev pagecache the 'update' case
341 is been introduced. It is needed to re-read from disk any pinned
342 buffer. NOTE: re-reading from disk is destructive so we can do it only
343 when we assume nobody is changing the buffercache under our I/O and when
344 we think the disk contains more recent information than the buffercache.
345 The update == 1 pass marks the buffers we need to update, the update == 2
346 pass does the actual I/O. */
347 void invalidate_bdev(struct block_device *bdev)
349 struct address_space *mapping = bdev->bd_inode->i_mapping;
351 if (mapping->nrpages == 0)
352 return;
354 invalidate_bh_lrus();
355 invalidate_mapping_pages(mapping, 0, -1);
359 * Kick pdflush then try to free up some ZONE_NORMAL memory.
361 static void free_more_memory(void)
363 struct zone **zones;
364 pg_data_t *pgdat;
366 wakeup_pdflush(1024);
367 yield();
369 for_each_online_pgdat(pgdat) {
370 zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones;
371 if (*zones)
372 try_to_free_pages(zones, 0, GFP_NOFS);
377 * I/O completion handler for block_read_full_page() - pages
378 * which come unlocked at the end of I/O.
380 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
382 unsigned long flags;
383 struct buffer_head *first;
384 struct buffer_head *tmp;
385 struct page *page;
386 int page_uptodate = 1;
388 BUG_ON(!buffer_async_read(bh));
390 page = bh->b_page;
391 if (uptodate) {
392 set_buffer_uptodate(bh);
393 } else {
394 clear_buffer_uptodate(bh);
395 if (printk_ratelimit())
396 buffer_io_error(bh);
397 SetPageError(page);
401 * Be _very_ careful from here on. Bad things can happen if
402 * two buffer heads end IO at almost the same time and both
403 * decide that the page is now completely done.
405 first = page_buffers(page);
406 local_irq_save(flags);
407 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
408 clear_buffer_async_read(bh);
409 unlock_buffer(bh);
410 tmp = bh;
411 do {
412 if (!buffer_uptodate(tmp))
413 page_uptodate = 0;
414 if (buffer_async_read(tmp)) {
415 BUG_ON(!buffer_locked(tmp));
416 goto still_busy;
418 tmp = tmp->b_this_page;
419 } while (tmp != bh);
420 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
421 local_irq_restore(flags);
424 * If none of the buffers had errors and they are all
425 * uptodate then we can set the page uptodate.
427 if (page_uptodate && !PageError(page))
428 SetPageUptodate(page);
429 unlock_page(page);
430 return;
432 still_busy:
433 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
434 local_irq_restore(flags);
435 return;
439 * Completion handler for block_write_full_page() - pages which are unlocked
440 * during I/O, and which have PageWriteback cleared upon I/O completion.
442 static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
444 char b[BDEVNAME_SIZE];
445 unsigned long flags;
446 struct buffer_head *first;
447 struct buffer_head *tmp;
448 struct page *page;
450 BUG_ON(!buffer_async_write(bh));
452 page = bh->b_page;
453 if (uptodate) {
454 set_buffer_uptodate(bh);
455 } else {
456 if (printk_ratelimit()) {
457 buffer_io_error(bh);
458 printk(KERN_WARNING "lost page write due to "
459 "I/O error on %s\n",
460 bdevname(bh->b_bdev, b));
462 set_bit(AS_EIO, &page->mapping->flags);
463 set_buffer_write_io_error(bh);
464 clear_buffer_uptodate(bh);
465 SetPageError(page);
468 first = page_buffers(page);
469 local_irq_save(flags);
470 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
472 clear_buffer_async_write(bh);
473 unlock_buffer(bh);
474 tmp = bh->b_this_page;
475 while (tmp != bh) {
476 if (buffer_async_write(tmp)) {
477 BUG_ON(!buffer_locked(tmp));
478 goto still_busy;
480 tmp = tmp->b_this_page;
482 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
483 local_irq_restore(flags);
484 end_page_writeback(page);
485 return;
487 still_busy:
488 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
489 local_irq_restore(flags);
490 return;
494 * If a page's buffers are under async readin (end_buffer_async_read
495 * completion) then there is a possibility that another thread of
496 * control could lock one of the buffers after it has completed
497 * but while some of the other buffers have not completed. This
498 * locked buffer would confuse end_buffer_async_read() into not unlocking
499 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
500 * that this buffer is not under async I/O.
502 * The page comes unlocked when it has no locked buffer_async buffers
503 * left.
505 * PageLocked prevents anyone starting new async I/O reads any of
506 * the buffers.
508 * PageWriteback is used to prevent simultaneous writeout of the same
509 * page.
511 * PageLocked prevents anyone from starting writeback of a page which is
512 * under read I/O (PageWriteback is only ever set against a locked page).
514 static void mark_buffer_async_read(struct buffer_head *bh)
516 bh->b_end_io = end_buffer_async_read;
517 set_buffer_async_read(bh);
520 void mark_buffer_async_write(struct buffer_head *bh)
522 bh->b_end_io = end_buffer_async_write;
523 set_buffer_async_write(bh);
525 EXPORT_SYMBOL(mark_buffer_async_write);
529 * fs/buffer.c contains helper functions for buffer-backed address space's
530 * fsync functions. A common requirement for buffer-based filesystems is
531 * that certain data from the backing blockdev needs to be written out for
532 * a successful fsync(). For example, ext2 indirect blocks need to be
533 * written back and waited upon before fsync() returns.
535 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
536 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
537 * management of a list of dependent buffers at ->i_mapping->private_list.
539 * Locking is a little subtle: try_to_free_buffers() will remove buffers
540 * from their controlling inode's queue when they are being freed. But
541 * try_to_free_buffers() will be operating against the *blockdev* mapping
542 * at the time, not against the S_ISREG file which depends on those buffers.
543 * So the locking for private_list is via the private_lock in the address_space
544 * which backs the buffers. Which is different from the address_space
545 * against which the buffers are listed. So for a particular address_space,
546 * mapping->private_lock does *not* protect mapping->private_list! In fact,
547 * mapping->private_list will always be protected by the backing blockdev's
548 * ->private_lock.
550 * Which introduces a requirement: all buffers on an address_space's
551 * ->private_list must be from the same address_space: the blockdev's.
553 * address_spaces which do not place buffers at ->private_list via these
554 * utility functions are free to use private_lock and private_list for
555 * whatever they want. The only requirement is that list_empty(private_list)
556 * be true at clear_inode() time.
558 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
559 * filesystems should do that. invalidate_inode_buffers() should just go
560 * BUG_ON(!list_empty).
562 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
563 * take an address_space, not an inode. And it should be called
564 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
565 * queued up.
567 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
568 * list if it is already on a list. Because if the buffer is on a list,
569 * it *must* already be on the right one. If not, the filesystem is being
570 * silly. This will save a ton of locking. But first we have to ensure
571 * that buffers are taken *off* the old inode's list when they are freed
572 * (presumably in truncate). That requires careful auditing of all
573 * filesystems (do it inside bforget()). It could also be done by bringing
574 * b_inode back.
578 * The buffer's backing address_space's private_lock must be held
580 static inline void __remove_assoc_queue(struct buffer_head *bh)
582 list_del_init(&bh->b_assoc_buffers);
583 WARN_ON(!bh->b_assoc_map);
584 if (buffer_write_io_error(bh))
585 set_bit(AS_EIO, &bh->b_assoc_map->flags);
586 bh->b_assoc_map = NULL;
589 int inode_has_buffers(struct inode *inode)
591 return !list_empty(&inode->i_data.private_list);
595 * osync is designed to support O_SYNC io. It waits synchronously for
596 * all already-submitted IO to complete, but does not queue any new
597 * writes to the disk.
599 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
600 * you dirty the buffers, and then use osync_inode_buffers to wait for
601 * completion. Any other dirty buffers which are not yet queued for
602 * write will not be flushed to disk by the osync.
604 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
606 struct buffer_head *bh;
607 struct list_head *p;
608 int err = 0;
610 spin_lock(lock);
611 repeat:
612 list_for_each_prev(p, list) {
613 bh = BH_ENTRY(p);
614 if (buffer_locked(bh)) {
615 get_bh(bh);
616 spin_unlock(lock);
617 wait_on_buffer(bh);
618 if (!buffer_uptodate(bh))
619 err = -EIO;
620 brelse(bh);
621 spin_lock(lock);
622 goto repeat;
625 spin_unlock(lock);
626 return err;
630 * sync_mapping_buffers - write out and wait upon a mapping's "associated"
631 * buffers
632 * @mapping: the mapping which wants those buffers written
634 * Starts I/O against the buffers at mapping->private_list, and waits upon
635 * that I/O.
637 * Basically, this is a convenience function for fsync().
638 * @mapping is a file or directory which needs those buffers to be written for
639 * a successful fsync().
641 int sync_mapping_buffers(struct address_space *mapping)
643 struct address_space *buffer_mapping = mapping->assoc_mapping;
645 if (buffer_mapping == NULL || list_empty(&mapping->private_list))
646 return 0;
648 return fsync_buffers_list(&buffer_mapping->private_lock,
649 &mapping->private_list);
651 EXPORT_SYMBOL(sync_mapping_buffers);
654 * Called when we've recently written block `bblock', and it is known that
655 * `bblock' was for a buffer_boundary() buffer. This means that the block at
656 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
657 * dirty, schedule it for IO. So that indirects merge nicely with their data.
659 void write_boundary_block(struct block_device *bdev,
660 sector_t bblock, unsigned blocksize)
662 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
663 if (bh) {
664 if (buffer_dirty(bh))
665 ll_rw_block(WRITE, 1, &bh);
666 put_bh(bh);
670 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
672 struct address_space *mapping = inode->i_mapping;
673 struct address_space *buffer_mapping = bh->b_page->mapping;
675 mark_buffer_dirty(bh);
676 if (!mapping->assoc_mapping) {
677 mapping->assoc_mapping = buffer_mapping;
678 } else {
679 BUG_ON(mapping->assoc_mapping != buffer_mapping);
681 if (list_empty(&bh->b_assoc_buffers)) {
682 spin_lock(&buffer_mapping->private_lock);
683 list_move_tail(&bh->b_assoc_buffers,
684 &mapping->private_list);
685 bh->b_assoc_map = mapping;
686 spin_unlock(&buffer_mapping->private_lock);
689 EXPORT_SYMBOL(mark_buffer_dirty_inode);
692 * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
693 * dirty.
695 * If warn is true, then emit a warning if the page is not uptodate and has
696 * not been truncated.
698 static int __set_page_dirty(struct page *page,
699 struct address_space *mapping, int warn)
701 if (unlikely(!mapping))
702 return !TestSetPageDirty(page);
704 if (TestSetPageDirty(page))
705 return 0;
707 write_lock_irq(&mapping->tree_lock);
708 if (page->mapping) { /* Race with truncate? */
709 WARN_ON_ONCE(warn && !PageUptodate(page));
711 if (mapping_cap_account_dirty(mapping)) {
712 __inc_zone_page_state(page, NR_FILE_DIRTY);
713 __inc_bdi_stat(mapping->backing_dev_info,
714 BDI_RECLAIMABLE);
715 task_io_account_write(PAGE_CACHE_SIZE);
717 radix_tree_tag_set(&mapping->page_tree,
718 page_index(page), PAGECACHE_TAG_DIRTY);
720 write_unlock_irq(&mapping->tree_lock);
721 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
723 return 1;
727 * Add a page to the dirty page list.
729 * It is a sad fact of life that this function is called from several places
730 * deeply under spinlocking. It may not sleep.
732 * If the page has buffers, the uptodate buffers are set dirty, to preserve
733 * dirty-state coherency between the page and the buffers. It the page does
734 * not have buffers then when they are later attached they will all be set
735 * dirty.
737 * The buffers are dirtied before the page is dirtied. There's a small race
738 * window in which a writepage caller may see the page cleanness but not the
739 * buffer dirtiness. That's fine. If this code were to set the page dirty
740 * before the buffers, a concurrent writepage caller could clear the page dirty
741 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
742 * page on the dirty page list.
744 * We use private_lock to lock against try_to_free_buffers while using the
745 * page's buffer list. Also use this to protect against clean buffers being
746 * added to the page after it was set dirty.
748 * FIXME: may need to call ->reservepage here as well. That's rather up to the
749 * address_space though.
751 int __set_page_dirty_buffers(struct page *page)
753 struct address_space *mapping = page_mapping(page);
755 if (unlikely(!mapping))
756 return !TestSetPageDirty(page);
758 spin_lock(&mapping->private_lock);
759 if (page_has_buffers(page)) {
760 struct buffer_head *head = page_buffers(page);
761 struct buffer_head *bh = head;
763 do {
764 set_buffer_dirty(bh);
765 bh = bh->b_this_page;
766 } while (bh != head);
768 spin_unlock(&mapping->private_lock);
770 return __set_page_dirty(page, mapping, 1);
772 EXPORT_SYMBOL(__set_page_dirty_buffers);
775 * Write out and wait upon a list of buffers.
777 * We have conflicting pressures: we want to make sure that all
778 * initially dirty buffers get waited on, but that any subsequently
779 * dirtied buffers don't. After all, we don't want fsync to last
780 * forever if somebody is actively writing to the file.
782 * Do this in two main stages: first we copy dirty buffers to a
783 * temporary inode list, queueing the writes as we go. Then we clean
784 * up, waiting for those writes to complete.
786 * During this second stage, any subsequent updates to the file may end
787 * up refiling the buffer on the original inode's dirty list again, so
788 * there is a chance we will end up with a buffer queued for write but
789 * not yet completed on that list. So, as a final cleanup we go through
790 * the osync code to catch these locked, dirty buffers without requeuing
791 * any newly dirty buffers for write.
793 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
795 struct buffer_head *bh;
796 struct list_head tmp;
797 int err = 0, err2;
799 INIT_LIST_HEAD(&tmp);
801 spin_lock(lock);
802 while (!list_empty(list)) {
803 bh = BH_ENTRY(list->next);
804 __remove_assoc_queue(bh);
805 if (buffer_dirty(bh) || buffer_locked(bh)) {
806 list_add(&bh->b_assoc_buffers, &tmp);
807 if (buffer_dirty(bh)) {
808 get_bh(bh);
809 spin_unlock(lock);
811 * Ensure any pending I/O completes so that
812 * ll_rw_block() actually writes the current
813 * contents - it is a noop if I/O is still in
814 * flight on potentially older contents.
816 ll_rw_block(SWRITE, 1, &bh);
817 brelse(bh);
818 spin_lock(lock);
823 while (!list_empty(&tmp)) {
824 bh = BH_ENTRY(tmp.prev);
825 list_del_init(&bh->b_assoc_buffers);
826 get_bh(bh);
827 spin_unlock(lock);
828 wait_on_buffer(bh);
829 if (!buffer_uptodate(bh))
830 err = -EIO;
831 brelse(bh);
832 spin_lock(lock);
835 spin_unlock(lock);
836 err2 = osync_buffers_list(lock, list);
837 if (err)
838 return err;
839 else
840 return err2;
844 * Invalidate any and all dirty buffers on a given inode. We are
845 * probably unmounting the fs, but that doesn't mean we have already
846 * done a sync(). Just drop the buffers from the inode list.
848 * NOTE: we take the inode's blockdev's mapping's private_lock. Which
849 * assumes that all the buffers are against the blockdev. Not true
850 * for reiserfs.
852 void invalidate_inode_buffers(struct inode *inode)
854 if (inode_has_buffers(inode)) {
855 struct address_space *mapping = &inode->i_data;
856 struct list_head *list = &mapping->private_list;
857 struct address_space *buffer_mapping = mapping->assoc_mapping;
859 spin_lock(&buffer_mapping->private_lock);
860 while (!list_empty(list))
861 __remove_assoc_queue(BH_ENTRY(list->next));
862 spin_unlock(&buffer_mapping->private_lock);
867 * Remove any clean buffers from the inode's buffer list. This is called
868 * when we're trying to free the inode itself. Those buffers can pin it.
870 * Returns true if all buffers were removed.
872 int remove_inode_buffers(struct inode *inode)
874 int ret = 1;
876 if (inode_has_buffers(inode)) {
877 struct address_space *mapping = &inode->i_data;
878 struct list_head *list = &mapping->private_list;
879 struct address_space *buffer_mapping = mapping->assoc_mapping;
881 spin_lock(&buffer_mapping->private_lock);
882 while (!list_empty(list)) {
883 struct buffer_head *bh = BH_ENTRY(list->next);
884 if (buffer_dirty(bh)) {
885 ret = 0;
886 break;
888 __remove_assoc_queue(bh);
890 spin_unlock(&buffer_mapping->private_lock);
892 return ret;
896 * Create the appropriate buffers when given a page for data area and
897 * the size of each buffer.. Use the bh->b_this_page linked list to
898 * follow the buffers created. Return NULL if unable to create more
899 * buffers.
901 * The retry flag is used to differentiate async IO (paging, swapping)
902 * which may not fail from ordinary buffer allocations.
904 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
905 int retry)
907 struct buffer_head *bh, *head;
908 long offset;
910 try_again:
911 head = NULL;
912 offset = PAGE_SIZE;
913 while ((offset -= size) >= 0) {
914 bh = alloc_buffer_head(GFP_NOFS);
915 if (!bh)
916 goto no_grow;
918 bh->b_bdev = NULL;
919 bh->b_this_page = head;
920 bh->b_blocknr = -1;
921 head = bh;
923 bh->b_state = 0;
924 atomic_set(&bh->b_count, 0);
925 bh->b_private = NULL;
926 bh->b_size = size;
928 /* Link the buffer to its page */
929 set_bh_page(bh, page, offset);
931 init_buffer(bh, NULL, NULL);
933 return head;
935 * In case anything failed, we just free everything we got.
937 no_grow:
938 if (head) {
939 do {
940 bh = head;
941 head = head->b_this_page;
942 free_buffer_head(bh);
943 } while (head);
947 * Return failure for non-async IO requests. Async IO requests
948 * are not allowed to fail, so we have to wait until buffer heads
949 * become available. But we don't want tasks sleeping with
950 * partially complete buffers, so all were released above.
952 if (!retry)
953 return NULL;
955 /* We're _really_ low on memory. Now we just
956 * wait for old buffer heads to become free due to
957 * finishing IO. Since this is an async request and
958 * the reserve list is empty, we're sure there are
959 * async buffer heads in use.
961 free_more_memory();
962 goto try_again;
964 EXPORT_SYMBOL_GPL(alloc_page_buffers);
966 static inline void
967 link_dev_buffers(struct page *page, struct buffer_head *head)
969 struct buffer_head *bh, *tail;
971 bh = head;
972 do {
973 tail = bh;
974 bh = bh->b_this_page;
975 } while (bh);
976 tail->b_this_page = head;
977 attach_page_buffers(page, head);
981 * Initialise the state of a blockdev page's buffers.
983 static void
984 init_page_buffers(struct page *page, struct block_device *bdev,
985 sector_t block, int size)
987 struct buffer_head *head = page_buffers(page);
988 struct buffer_head *bh = head;
989 int uptodate = PageUptodate(page);
991 do {
992 if (!buffer_mapped(bh)) {
993 init_buffer(bh, NULL, NULL);
994 bh->b_bdev = bdev;
995 bh->b_blocknr = block;
996 if (uptodate)
997 set_buffer_uptodate(bh);
998 set_buffer_mapped(bh);
1000 block++;
1001 bh = bh->b_this_page;
1002 } while (bh != head);
1006 * Create the page-cache page that contains the requested block.
1008 * This is user purely for blockdev mappings.
1010 static struct page *
1011 grow_dev_page(struct block_device *bdev, sector_t block,
1012 pgoff_t index, int size)
1014 struct inode *inode = bdev->bd_inode;
1015 struct page *page;
1016 struct buffer_head *bh;
1018 page = find_or_create_page(inode->i_mapping, index,
1019 (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
1020 if (!page)
1021 return NULL;
1023 BUG_ON(!PageLocked(page));
1025 if (page_has_buffers(page)) {
1026 bh = page_buffers(page);
1027 if (bh->b_size == size) {
1028 init_page_buffers(page, bdev, block, size);
1029 return page;
1031 if (!try_to_free_buffers(page))
1032 goto failed;
1036 * Allocate some buffers for this page
1038 bh = alloc_page_buffers(page, size, 0);
1039 if (!bh)
1040 goto failed;
1043 * Link the page to the buffers and initialise them. Take the
1044 * lock to be atomic wrt __find_get_block(), which does not
1045 * run under the page lock.
1047 spin_lock(&inode->i_mapping->private_lock);
1048 link_dev_buffers(page, bh);
1049 init_page_buffers(page, bdev, block, size);
1050 spin_unlock(&inode->i_mapping->private_lock);
1051 return page;
1053 failed:
1054 BUG();
1055 unlock_page(page);
1056 page_cache_release(page);
1057 return NULL;
1061 * Create buffers for the specified block device block's page. If
1062 * that page was dirty, the buffers are set dirty also.
1064 static int
1065 grow_buffers(struct block_device *bdev, sector_t block, int size)
1067 struct page *page;
1068 pgoff_t index;
1069 int sizebits;
1071 sizebits = -1;
1072 do {
1073 sizebits++;
1074 } while ((size << sizebits) < PAGE_SIZE);
1076 index = block >> sizebits;
1079 * Check for a block which wants to lie outside our maximum possible
1080 * pagecache index. (this comparison is done using sector_t types).
1082 if (unlikely(index != block >> sizebits)) {
1083 char b[BDEVNAME_SIZE];
1085 printk(KERN_ERR "%s: requested out-of-range block %llu for "
1086 "device %s\n",
1087 __FUNCTION__, (unsigned long long)block,
1088 bdevname(bdev, b));
1089 return -EIO;
1091 block = index << sizebits;
1092 /* Create a page with the proper size buffers.. */
1093 page = grow_dev_page(bdev, block, index, size);
1094 if (!page)
1095 return 0;
1096 unlock_page(page);
1097 page_cache_release(page);
1098 return 1;
1101 static struct buffer_head *
1102 __getblk_slow(struct block_device *bdev, sector_t block, int size)
1104 /* Size must be multiple of hard sectorsize */
1105 if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
1106 (size < 512 || size > PAGE_SIZE))) {
1107 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1108 size);
1109 printk(KERN_ERR "hardsect size: %d\n",
1110 bdev_hardsect_size(bdev));
1112 dump_stack();
1113 return NULL;
1116 for (;;) {
1117 struct buffer_head * bh;
1118 int ret;
1120 bh = __find_get_block(bdev, block, size);
1121 if (bh)
1122 return bh;
1124 ret = grow_buffers(bdev, block, size);
1125 if (ret < 0)
1126 return NULL;
1127 if (ret == 0)
1128 free_more_memory();
1133 * The relationship between dirty buffers and dirty pages:
1135 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1136 * the page is tagged dirty in its radix tree.
1138 * At all times, the dirtiness of the buffers represents the dirtiness of
1139 * subsections of the page. If the page has buffers, the page dirty bit is
1140 * merely a hint about the true dirty state.
1142 * When a page is set dirty in its entirety, all its buffers are marked dirty
1143 * (if the page has buffers).
1145 * When a buffer is marked dirty, its page is dirtied, but the page's other
1146 * buffers are not.
1148 * Also. When blockdev buffers are explicitly read with bread(), they
1149 * individually become uptodate. But their backing page remains not
1150 * uptodate - even if all of its buffers are uptodate. A subsequent
1151 * block_read_full_page() against that page will discover all the uptodate
1152 * buffers, will set the page uptodate and will perform no I/O.
1156 * mark_buffer_dirty - mark a buffer_head as needing writeout
1157 * @bh: the buffer_head to mark dirty
1159 * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1160 * backing page dirty, then tag the page as dirty in its address_space's radix
1161 * tree and then attach the address_space's inode to its superblock's dirty
1162 * inode list.
1164 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1165 * mapping->tree_lock and the global inode_lock.
1167 void mark_buffer_dirty(struct buffer_head *bh)
1169 WARN_ON_ONCE(!buffer_uptodate(bh));
1170 if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
1171 __set_page_dirty(bh->b_page, page_mapping(bh->b_page), 0);
1175 * Decrement a buffer_head's reference count. If all buffers against a page
1176 * have zero reference count, are clean and unlocked, and if the page is clean
1177 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1178 * in preparation for freeing it (sometimes, rarely, buffers are removed from
1179 * a page but it ends up not being freed, and buffers may later be reattached).
1181 void __brelse(struct buffer_head * buf)
1183 if (atomic_read(&buf->b_count)) {
1184 put_bh(buf);
1185 return;
1187 printk(KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1188 WARN_ON(1);
1192 * bforget() is like brelse(), except it discards any
1193 * potentially dirty data.
1195 void __bforget(struct buffer_head *bh)
1197 clear_buffer_dirty(bh);
1198 if (!list_empty(&bh->b_assoc_buffers)) {
1199 struct address_space *buffer_mapping = bh->b_page->mapping;
1201 spin_lock(&buffer_mapping->private_lock);
1202 list_del_init(&bh->b_assoc_buffers);
1203 bh->b_assoc_map = NULL;
1204 spin_unlock(&buffer_mapping->private_lock);
1206 __brelse(bh);
1209 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1211 lock_buffer(bh);
1212 if (buffer_uptodate(bh)) {
1213 unlock_buffer(bh);
1214 return bh;
1215 } else {
1216 get_bh(bh);
1217 bh->b_end_io = end_buffer_read_sync;
1218 submit_bh(READ, bh);
1219 wait_on_buffer(bh);
1220 if (buffer_uptodate(bh))
1221 return bh;
1223 brelse(bh);
1224 return NULL;
1228 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1229 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1230 * refcount elevated by one when they're in an LRU. A buffer can only appear
1231 * once in a particular CPU's LRU. A single buffer can be present in multiple
1232 * CPU's LRUs at the same time.
1234 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1235 * sb_find_get_block().
1237 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1238 * a local interrupt disable for that.
1241 #define BH_LRU_SIZE 8
1243 struct bh_lru {
1244 struct buffer_head *bhs[BH_LRU_SIZE];
1247 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1249 #ifdef CONFIG_SMP
1250 #define bh_lru_lock() local_irq_disable()
1251 #define bh_lru_unlock() local_irq_enable()
1252 #else
1253 #define bh_lru_lock() preempt_disable()
1254 #define bh_lru_unlock() preempt_enable()
1255 #endif
1257 static inline void check_irqs_on(void)
1259 #ifdef irqs_disabled
1260 BUG_ON(irqs_disabled());
1261 #endif
1265 * The LRU management algorithm is dopey-but-simple. Sorry.
1267 static void bh_lru_install(struct buffer_head *bh)
1269 struct buffer_head *evictee = NULL;
1270 struct bh_lru *lru;
1272 check_irqs_on();
1273 bh_lru_lock();
1274 lru = &__get_cpu_var(bh_lrus);
1275 if (lru->bhs[0] != bh) {
1276 struct buffer_head *bhs[BH_LRU_SIZE];
1277 int in;
1278 int out = 0;
1280 get_bh(bh);
1281 bhs[out++] = bh;
1282 for (in = 0; in < BH_LRU_SIZE; in++) {
1283 struct buffer_head *bh2 = lru->bhs[in];
1285 if (bh2 == bh) {
1286 __brelse(bh2);
1287 } else {
1288 if (out >= BH_LRU_SIZE) {
1289 BUG_ON(evictee != NULL);
1290 evictee = bh2;
1291 } else {
1292 bhs[out++] = bh2;
1296 while (out < BH_LRU_SIZE)
1297 bhs[out++] = NULL;
1298 memcpy(lru->bhs, bhs, sizeof(bhs));
1300 bh_lru_unlock();
1302 if (evictee)
1303 __brelse(evictee);
1307 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1309 static struct buffer_head *
1310 lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1312 struct buffer_head *ret = NULL;
1313 struct bh_lru *lru;
1314 unsigned int i;
1316 check_irqs_on();
1317 bh_lru_lock();
1318 lru = &__get_cpu_var(bh_lrus);
1319 for (i = 0; i < BH_LRU_SIZE; i++) {
1320 struct buffer_head *bh = lru->bhs[i];
1322 if (bh && bh->b_bdev == bdev &&
1323 bh->b_blocknr == block && bh->b_size == size) {
1324 if (i) {
1325 while (i) {
1326 lru->bhs[i] = lru->bhs[i - 1];
1327 i--;
1329 lru->bhs[0] = bh;
1331 get_bh(bh);
1332 ret = bh;
1333 break;
1336 bh_lru_unlock();
1337 return ret;
1341 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1342 * it in the LRU and mark it as accessed. If it is not present then return
1343 * NULL
1345 struct buffer_head *
1346 __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1348 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1350 if (bh == NULL) {
1351 bh = __find_get_block_slow(bdev, block);
1352 if (bh)
1353 bh_lru_install(bh);
1355 if (bh)
1356 touch_buffer(bh);
1357 return bh;
1359 EXPORT_SYMBOL(__find_get_block);
1362 * __getblk will locate (and, if necessary, create) the buffer_head
1363 * which corresponds to the passed block_device, block and size. The
1364 * returned buffer has its reference count incremented.
1366 * __getblk() cannot fail - it just keeps trying. If you pass it an
1367 * illegal block number, __getblk() will happily return a buffer_head
1368 * which represents the non-existent block. Very weird.
1370 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1371 * attempt is failing. FIXME, perhaps?
1373 struct buffer_head *
1374 __getblk(struct block_device *bdev, sector_t block, unsigned size)
1376 struct buffer_head *bh = __find_get_block(bdev, block, size);
1378 might_sleep();
1379 if (bh == NULL)
1380 bh = __getblk_slow(bdev, block, size);
1381 return bh;
1383 EXPORT_SYMBOL(__getblk);
1386 * Do async read-ahead on a buffer..
1388 void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1390 struct buffer_head *bh = __getblk(bdev, block, size);
1391 if (likely(bh)) {
1392 ll_rw_block(READA, 1, &bh);
1393 brelse(bh);
1396 EXPORT_SYMBOL(__breadahead);
1399 * __bread() - reads a specified block and returns the bh
1400 * @bdev: the block_device to read from
1401 * @block: number of block
1402 * @size: size (in bytes) to read
1404 * Reads a specified block, and returns buffer head that contains it.
1405 * It returns NULL if the block was unreadable.
1407 struct buffer_head *
1408 __bread(struct block_device *bdev, sector_t block, unsigned size)
1410 struct buffer_head *bh = __getblk(bdev, block, size);
1412 if (likely(bh) && !buffer_uptodate(bh))
1413 bh = __bread_slow(bh);
1414 return bh;
1416 EXPORT_SYMBOL(__bread);
1419 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1420 * This doesn't race because it runs in each cpu either in irq
1421 * or with preempt disabled.
1423 static void invalidate_bh_lru(void *arg)
1425 struct bh_lru *b = &get_cpu_var(bh_lrus);
1426 int i;
1428 for (i = 0; i < BH_LRU_SIZE; i++) {
1429 brelse(b->bhs[i]);
1430 b->bhs[i] = NULL;
1432 put_cpu_var(bh_lrus);
1435 void invalidate_bh_lrus(void)
1437 on_each_cpu(invalidate_bh_lru, NULL, 1, 1);
1439 EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
1441 void set_bh_page(struct buffer_head *bh,
1442 struct page *page, unsigned long offset)
1444 bh->b_page = page;
1445 BUG_ON(offset >= PAGE_SIZE);
1446 if (PageHighMem(page))
1448 * This catches illegal uses and preserves the offset:
1450 bh->b_data = (char *)(0 + offset);
1451 else
1452 bh->b_data = page_address(page) + offset;
1454 EXPORT_SYMBOL(set_bh_page);
1457 * Called when truncating a buffer on a page completely.
1459 static void discard_buffer(struct buffer_head * bh)
1461 lock_buffer(bh);
1462 clear_buffer_dirty(bh);
1463 bh->b_bdev = NULL;
1464 clear_buffer_mapped(bh);
1465 clear_buffer_req(bh);
1466 clear_buffer_new(bh);
1467 clear_buffer_delay(bh);
1468 clear_buffer_unwritten(bh);
1469 unlock_buffer(bh);
1473 * block_invalidatepage - invalidate part of all of a buffer-backed page
1475 * @page: the page which is affected
1476 * @offset: the index of the truncation point
1478 * block_invalidatepage() is called when all or part of the page has become
1479 * invalidatedby a truncate operation.
1481 * block_invalidatepage() does not have to release all buffers, but it must
1482 * ensure that no dirty buffer is left outside @offset and that no I/O
1483 * is underway against any of the blocks which are outside the truncation
1484 * point. Because the caller is about to free (and possibly reuse) those
1485 * blocks on-disk.
1487 void block_invalidatepage(struct page *page, unsigned long offset)
1489 struct buffer_head *head, *bh, *next;
1490 unsigned int curr_off = 0;
1492 BUG_ON(!PageLocked(page));
1493 if (!page_has_buffers(page))
1494 goto out;
1496 head = page_buffers(page);
1497 bh = head;
1498 do {
1499 unsigned int next_off = curr_off + bh->b_size;
1500 next = bh->b_this_page;
1503 * is this block fully invalidated?
1505 if (offset <= curr_off)
1506 discard_buffer(bh);
1507 curr_off = next_off;
1508 bh = next;
1509 } while (bh != head);
1512 * We release buffers only if the entire page is being invalidated.
1513 * The get_block cached value has been unconditionally invalidated,
1514 * so real IO is not possible anymore.
1516 if (offset == 0)
1517 try_to_release_page(page, 0);
1518 out:
1519 return;
1521 EXPORT_SYMBOL(block_invalidatepage);
1524 * We attach and possibly dirty the buffers atomically wrt
1525 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
1526 * is already excluded via the page lock.
1528 void create_empty_buffers(struct page *page,
1529 unsigned long blocksize, unsigned long b_state)
1531 struct buffer_head *bh, *head, *tail;
1533 head = alloc_page_buffers(page, blocksize, 1);
1534 bh = head;
1535 do {
1536 bh->b_state |= b_state;
1537 tail = bh;
1538 bh = bh->b_this_page;
1539 } while (bh);
1540 tail->b_this_page = head;
1542 spin_lock(&page->mapping->private_lock);
1543 if (PageUptodate(page) || PageDirty(page)) {
1544 bh = head;
1545 do {
1546 if (PageDirty(page))
1547 set_buffer_dirty(bh);
1548 if (PageUptodate(page))
1549 set_buffer_uptodate(bh);
1550 bh = bh->b_this_page;
1551 } while (bh != head);
1553 attach_page_buffers(page, head);
1554 spin_unlock(&page->mapping->private_lock);
1556 EXPORT_SYMBOL(create_empty_buffers);
1559 * We are taking a block for data and we don't want any output from any
1560 * buffer-cache aliases starting from return from that function and
1561 * until the moment when something will explicitly mark the buffer
1562 * dirty (hopefully that will not happen until we will free that block ;-)
1563 * We don't even need to mark it not-uptodate - nobody can expect
1564 * anything from a newly allocated buffer anyway. We used to used
1565 * unmap_buffer() for such invalidation, but that was wrong. We definitely
1566 * don't want to mark the alias unmapped, for example - it would confuse
1567 * anyone who might pick it with bread() afterwards...
1569 * Also.. Note that bforget() doesn't lock the buffer. So there can
1570 * be writeout I/O going on against recently-freed buffers. We don't
1571 * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1572 * only if we really need to. That happens here.
1574 void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1576 struct buffer_head *old_bh;
1578 might_sleep();
1580 old_bh = __find_get_block_slow(bdev, block);
1581 if (old_bh) {
1582 clear_buffer_dirty(old_bh);
1583 wait_on_buffer(old_bh);
1584 clear_buffer_req(old_bh);
1585 __brelse(old_bh);
1588 EXPORT_SYMBOL(unmap_underlying_metadata);
1591 * NOTE! All mapped/uptodate combinations are valid:
1593 * Mapped Uptodate Meaning
1595 * No No "unknown" - must do get_block()
1596 * No Yes "hole" - zero-filled
1597 * Yes No "allocated" - allocated on disk, not read in
1598 * Yes Yes "valid" - allocated and up-to-date in memory.
1600 * "Dirty" is valid only with the last case (mapped+uptodate).
1604 * While block_write_full_page is writing back the dirty buffers under
1605 * the page lock, whoever dirtied the buffers may decide to clean them
1606 * again at any time. We handle that by only looking at the buffer
1607 * state inside lock_buffer().
1609 * If block_write_full_page() is called for regular writeback
1610 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1611 * locked buffer. This only can happen if someone has written the buffer
1612 * directly, with submit_bh(). At the address_space level PageWriteback
1613 * prevents this contention from occurring.
1615 static int __block_write_full_page(struct inode *inode, struct page *page,
1616 get_block_t *get_block, struct writeback_control *wbc)
1618 int err;
1619 sector_t block;
1620 sector_t last_block;
1621 struct buffer_head *bh, *head;
1622 const unsigned blocksize = 1 << inode->i_blkbits;
1623 int nr_underway = 0;
1625 BUG_ON(!PageLocked(page));
1627 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1629 if (!page_has_buffers(page)) {
1630 create_empty_buffers(page, blocksize,
1631 (1 << BH_Dirty)|(1 << BH_Uptodate));
1635 * Be very careful. We have no exclusion from __set_page_dirty_buffers
1636 * here, and the (potentially unmapped) buffers may become dirty at
1637 * any time. If a buffer becomes dirty here after we've inspected it
1638 * then we just miss that fact, and the page stays dirty.
1640 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1641 * handle that here by just cleaning them.
1644 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1645 head = page_buffers(page);
1646 bh = head;
1649 * Get all the dirty buffers mapped to disk addresses and
1650 * handle any aliases from the underlying blockdev's mapping.
1652 do {
1653 if (block > last_block) {
1655 * mapped buffers outside i_size will occur, because
1656 * this page can be outside i_size when there is a
1657 * truncate in progress.
1660 * The buffer was zeroed by block_write_full_page()
1662 clear_buffer_dirty(bh);
1663 set_buffer_uptodate(bh);
1664 } else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
1665 WARN_ON(bh->b_size != blocksize);
1666 err = get_block(inode, block, bh, 1);
1667 if (err)
1668 goto recover;
1669 if (buffer_new(bh)) {
1670 /* blockdev mappings never come here */
1671 clear_buffer_new(bh);
1672 unmap_underlying_metadata(bh->b_bdev,
1673 bh->b_blocknr);
1676 bh = bh->b_this_page;
1677 block++;
1678 } while (bh != head);
1680 do {
1681 if (!buffer_mapped(bh))
1682 continue;
1684 * If it's a fully non-blocking write attempt and we cannot
1685 * lock the buffer then redirty the page. Note that this can
1686 * potentially cause a busy-wait loop from pdflush and kswapd
1687 * activity, but those code paths have their own higher-level
1688 * throttling.
1690 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1691 lock_buffer(bh);
1692 } else if (test_set_buffer_locked(bh)) {
1693 redirty_page_for_writepage(wbc, page);
1694 continue;
1696 if (test_clear_buffer_dirty(bh)) {
1697 mark_buffer_async_write(bh);
1698 } else {
1699 unlock_buffer(bh);
1701 } while ((bh = bh->b_this_page) != head);
1704 * The page and its buffers are protected by PageWriteback(), so we can
1705 * drop the bh refcounts early.
1707 BUG_ON(PageWriteback(page));
1708 set_page_writeback(page);
1710 do {
1711 struct buffer_head *next = bh->b_this_page;
1712 if (buffer_async_write(bh)) {
1713 submit_bh(WRITE, bh);
1714 nr_underway++;
1716 bh = next;
1717 } while (bh != head);
1718 unlock_page(page);
1720 err = 0;
1721 done:
1722 if (nr_underway == 0) {
1724 * The page was marked dirty, but the buffers were
1725 * clean. Someone wrote them back by hand with
1726 * ll_rw_block/submit_bh. A rare case.
1728 end_page_writeback(page);
1731 * The page and buffer_heads can be released at any time from
1732 * here on.
1735 return err;
1737 recover:
1739 * ENOSPC, or some other error. We may already have added some
1740 * blocks to the file, so we need to write these out to avoid
1741 * exposing stale data.
1742 * The page is currently locked and not marked for writeback
1744 bh = head;
1745 /* Recovery: lock and submit the mapped buffers */
1746 do {
1747 if (buffer_mapped(bh) && buffer_dirty(bh)) {
1748 lock_buffer(bh);
1749 mark_buffer_async_write(bh);
1750 } else {
1752 * The buffer may have been set dirty during
1753 * attachment to a dirty page.
1755 clear_buffer_dirty(bh);
1757 } while ((bh = bh->b_this_page) != head);
1758 SetPageError(page);
1759 BUG_ON(PageWriteback(page));
1760 mapping_set_error(page->mapping, err);
1761 set_page_writeback(page);
1762 do {
1763 struct buffer_head *next = bh->b_this_page;
1764 if (buffer_async_write(bh)) {
1765 clear_buffer_dirty(bh);
1766 submit_bh(WRITE, bh);
1767 nr_underway++;
1769 bh = next;
1770 } while (bh != head);
1771 unlock_page(page);
1772 goto done;
1776 * If a page has any new buffers, zero them out here, and mark them uptodate
1777 * and dirty so they'll be written out (in order to prevent uninitialised
1778 * block data from leaking). And clear the new bit.
1780 void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1782 unsigned int block_start, block_end;
1783 struct buffer_head *head, *bh;
1785 BUG_ON(!PageLocked(page));
1786 if (!page_has_buffers(page))
1787 return;
1789 bh = head = page_buffers(page);
1790 block_start = 0;
1791 do {
1792 block_end = block_start + bh->b_size;
1794 if (buffer_new(bh)) {
1795 if (block_end > from && block_start < to) {
1796 if (!PageUptodate(page)) {
1797 unsigned start, size;
1799 start = max(from, block_start);
1800 size = min(to, block_end) - start;
1802 zero_user(page, start, size);
1803 set_buffer_uptodate(bh);
1806 clear_buffer_new(bh);
1807 mark_buffer_dirty(bh);
1811 block_start = block_end;
1812 bh = bh->b_this_page;
1813 } while (bh != head);
1815 EXPORT_SYMBOL(page_zero_new_buffers);
1817 static int __block_prepare_write(struct inode *inode, struct page *page,
1818 unsigned from, unsigned to, get_block_t *get_block)
1820 unsigned block_start, block_end;
1821 sector_t block;
1822 int err = 0;
1823 unsigned blocksize, bbits;
1824 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1826 BUG_ON(!PageLocked(page));
1827 BUG_ON(from > PAGE_CACHE_SIZE);
1828 BUG_ON(to > PAGE_CACHE_SIZE);
1829 BUG_ON(from > to);
1831 blocksize = 1 << inode->i_blkbits;
1832 if (!page_has_buffers(page))
1833 create_empty_buffers(page, blocksize, 0);
1834 head = page_buffers(page);
1836 bbits = inode->i_blkbits;
1837 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1839 for(bh = head, block_start = 0; bh != head || !block_start;
1840 block++, block_start=block_end, bh = bh->b_this_page) {
1841 block_end = block_start + blocksize;
1842 if (block_end <= from || block_start >= to) {
1843 if (PageUptodate(page)) {
1844 if (!buffer_uptodate(bh))
1845 set_buffer_uptodate(bh);
1847 continue;
1849 if (buffer_new(bh))
1850 clear_buffer_new(bh);
1851 if (!buffer_mapped(bh)) {
1852 WARN_ON(bh->b_size != blocksize);
1853 err = get_block(inode, block, bh, 1);
1854 if (err)
1855 break;
1856 if (buffer_new(bh)) {
1857 unmap_underlying_metadata(bh->b_bdev,
1858 bh->b_blocknr);
1859 if (PageUptodate(page)) {
1860 clear_buffer_new(bh);
1861 set_buffer_uptodate(bh);
1862 mark_buffer_dirty(bh);
1863 continue;
1865 if (block_end > to || block_start < from)
1866 zero_user_segments(page,
1867 to, block_end,
1868 block_start, from);
1869 continue;
1872 if (PageUptodate(page)) {
1873 if (!buffer_uptodate(bh))
1874 set_buffer_uptodate(bh);
1875 continue;
1877 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1878 !buffer_unwritten(bh) &&
1879 (block_start < from || block_end > to)) {
1880 ll_rw_block(READ, 1, &bh);
1881 *wait_bh++=bh;
1885 * If we issued read requests - let them complete.
1887 while(wait_bh > wait) {
1888 wait_on_buffer(*--wait_bh);
1889 if (!buffer_uptodate(*wait_bh))
1890 err = -EIO;
1892 if (unlikely(err))
1893 page_zero_new_buffers(page, from, to);
1894 return err;
1897 static int __block_commit_write(struct inode *inode, struct page *page,
1898 unsigned from, unsigned to)
1900 unsigned block_start, block_end;
1901 int partial = 0;
1902 unsigned blocksize;
1903 struct buffer_head *bh, *head;
1905 blocksize = 1 << inode->i_blkbits;
1907 for(bh = head = page_buffers(page), block_start = 0;
1908 bh != head || !block_start;
1909 block_start=block_end, bh = bh->b_this_page) {
1910 block_end = block_start + blocksize;
1911 if (block_end <= from || block_start >= to) {
1912 if (!buffer_uptodate(bh))
1913 partial = 1;
1914 } else {
1915 set_buffer_uptodate(bh);
1916 mark_buffer_dirty(bh);
1918 clear_buffer_new(bh);
1922 * If this is a partial write which happened to make all buffers
1923 * uptodate then we can optimize away a bogus readpage() for
1924 * the next read(). Here we 'discover' whether the page went
1925 * uptodate as a result of this (potentially partial) write.
1927 if (!partial)
1928 SetPageUptodate(page);
1929 return 0;
1933 * block_write_begin takes care of the basic task of block allocation and
1934 * bringing partial write blocks uptodate first.
1936 * If *pagep is not NULL, then block_write_begin uses the locked page
1937 * at *pagep rather than allocating its own. In this case, the page will
1938 * not be unlocked or deallocated on failure.
1940 int block_write_begin(struct file *file, struct address_space *mapping,
1941 loff_t pos, unsigned len, unsigned flags,
1942 struct page **pagep, void **fsdata,
1943 get_block_t *get_block)
1945 struct inode *inode = mapping->host;
1946 int status = 0;
1947 struct page *page;
1948 pgoff_t index;
1949 unsigned start, end;
1950 int ownpage = 0;
1952 index = pos >> PAGE_CACHE_SHIFT;
1953 start = pos & (PAGE_CACHE_SIZE - 1);
1954 end = start + len;
1956 page = *pagep;
1957 if (page == NULL) {
1958 ownpage = 1;
1959 page = __grab_cache_page(mapping, index);
1960 if (!page) {
1961 status = -ENOMEM;
1962 goto out;
1964 *pagep = page;
1965 } else
1966 BUG_ON(!PageLocked(page));
1968 status = __block_prepare_write(inode, page, start, end, get_block);
1969 if (unlikely(status)) {
1970 ClearPageUptodate(page);
1972 if (ownpage) {
1973 unlock_page(page);
1974 page_cache_release(page);
1975 *pagep = NULL;
1978 * prepare_write() may have instantiated a few blocks
1979 * outside i_size. Trim these off again. Don't need
1980 * i_size_read because we hold i_mutex.
1982 if (pos + len > inode->i_size)
1983 vmtruncate(inode, inode->i_size);
1985 goto out;
1988 out:
1989 return status;
1991 EXPORT_SYMBOL(block_write_begin);
1993 int block_write_end(struct file *file, struct address_space *mapping,
1994 loff_t pos, unsigned len, unsigned copied,
1995 struct page *page, void *fsdata)
1997 struct inode *inode = mapping->host;
1998 unsigned start;
2000 start = pos & (PAGE_CACHE_SIZE - 1);
2002 if (unlikely(copied < len)) {
2004 * The buffers that were written will now be uptodate, so we
2005 * don't have to worry about a readpage reading them and
2006 * overwriting a partial write. However if we have encountered
2007 * a short write and only partially written into a buffer, it
2008 * will not be marked uptodate, so a readpage might come in and
2009 * destroy our partial write.
2011 * Do the simplest thing, and just treat any short write to a
2012 * non uptodate page as a zero-length write, and force the
2013 * caller to redo the whole thing.
2015 if (!PageUptodate(page))
2016 copied = 0;
2018 page_zero_new_buffers(page, start+copied, start+len);
2020 flush_dcache_page(page);
2022 /* This could be a short (even 0-length) commit */
2023 __block_commit_write(inode, page, start, start+copied);
2025 return copied;
2027 EXPORT_SYMBOL(block_write_end);
2029 int generic_write_end(struct file *file, struct address_space *mapping,
2030 loff_t pos, unsigned len, unsigned copied,
2031 struct page *page, void *fsdata)
2033 struct inode *inode = mapping->host;
2035 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2038 * No need to use i_size_read() here, the i_size
2039 * cannot change under us because we hold i_mutex.
2041 * But it's important to update i_size while still holding page lock:
2042 * page writeout could otherwise come in and zero beyond i_size.
2044 if (pos+copied > inode->i_size) {
2045 i_size_write(inode, pos+copied);
2046 mark_inode_dirty(inode);
2049 unlock_page(page);
2050 page_cache_release(page);
2052 return copied;
2054 EXPORT_SYMBOL(generic_write_end);
2057 * Generic "read page" function for block devices that have the normal
2058 * get_block functionality. This is most of the block device filesystems.
2059 * Reads the page asynchronously --- the unlock_buffer() and
2060 * set/clear_buffer_uptodate() functions propagate buffer state into the
2061 * page struct once IO has completed.
2063 int block_read_full_page(struct page *page, get_block_t *get_block)
2065 struct inode *inode = page->mapping->host;
2066 sector_t iblock, lblock;
2067 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2068 unsigned int blocksize;
2069 int nr, i;
2070 int fully_mapped = 1;
2072 BUG_ON(!PageLocked(page));
2073 blocksize = 1 << inode->i_blkbits;
2074 if (!page_has_buffers(page))
2075 create_empty_buffers(page, blocksize, 0);
2076 head = page_buffers(page);
2078 iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2079 lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2080 bh = head;
2081 nr = 0;
2082 i = 0;
2084 do {
2085 if (buffer_uptodate(bh))
2086 continue;
2088 if (!buffer_mapped(bh)) {
2089 int err = 0;
2091 fully_mapped = 0;
2092 if (iblock < lblock) {
2093 WARN_ON(bh->b_size != blocksize);
2094 err = get_block(inode, iblock, bh, 0);
2095 if (err)
2096 SetPageError(page);
2098 if (!buffer_mapped(bh)) {
2099 zero_user(page, i * blocksize, blocksize);
2100 if (!err)
2101 set_buffer_uptodate(bh);
2102 continue;
2105 * get_block() might have updated the buffer
2106 * synchronously
2108 if (buffer_uptodate(bh))
2109 continue;
2111 arr[nr++] = bh;
2112 } while (i++, iblock++, (bh = bh->b_this_page) != head);
2114 if (fully_mapped)
2115 SetPageMappedToDisk(page);
2117 if (!nr) {
2119 * All buffers are uptodate - we can set the page uptodate
2120 * as well. But not if get_block() returned an error.
2122 if (!PageError(page))
2123 SetPageUptodate(page);
2124 unlock_page(page);
2125 return 0;
2128 /* Stage two: lock the buffers */
2129 for (i = 0; i < nr; i++) {
2130 bh = arr[i];
2131 lock_buffer(bh);
2132 mark_buffer_async_read(bh);
2136 * Stage 3: start the IO. Check for uptodateness
2137 * inside the buffer lock in case another process reading
2138 * the underlying blockdev brought it uptodate (the sct fix).
2140 for (i = 0; i < nr; i++) {
2141 bh = arr[i];
2142 if (buffer_uptodate(bh))
2143 end_buffer_async_read(bh, 1);
2144 else
2145 submit_bh(READ, bh);
2147 return 0;
2150 /* utility function for filesystems that need to do work on expanding
2151 * truncates. Uses filesystem pagecache writes to allow the filesystem to
2152 * deal with the hole.
2154 int generic_cont_expand_simple(struct inode *inode, loff_t size)
2156 struct address_space *mapping = inode->i_mapping;
2157 struct page *page;
2158 void *fsdata;
2159 unsigned long limit;
2160 int err;
2162 err = -EFBIG;
2163 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
2164 if (limit != RLIM_INFINITY && size > (loff_t)limit) {
2165 send_sig(SIGXFSZ, current, 0);
2166 goto out;
2168 if (size > inode->i_sb->s_maxbytes)
2169 goto out;
2171 err = pagecache_write_begin(NULL, mapping, size, 0,
2172 AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND,
2173 &page, &fsdata);
2174 if (err)
2175 goto out;
2177 err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
2178 BUG_ON(err > 0);
2180 out:
2181 return err;
2184 int cont_expand_zero(struct file *file, struct address_space *mapping,
2185 loff_t pos, loff_t *bytes)
2187 struct inode *inode = mapping->host;
2188 unsigned blocksize = 1 << inode->i_blkbits;
2189 struct page *page;
2190 void *fsdata;
2191 pgoff_t index, curidx;
2192 loff_t curpos;
2193 unsigned zerofrom, offset, len;
2194 int err = 0;
2196 index = pos >> PAGE_CACHE_SHIFT;
2197 offset = pos & ~PAGE_CACHE_MASK;
2199 while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
2200 zerofrom = curpos & ~PAGE_CACHE_MASK;
2201 if (zerofrom & (blocksize-1)) {
2202 *bytes |= (blocksize-1);
2203 (*bytes)++;
2205 len = PAGE_CACHE_SIZE - zerofrom;
2207 err = pagecache_write_begin(file, mapping, curpos, len,
2208 AOP_FLAG_UNINTERRUPTIBLE,
2209 &page, &fsdata);
2210 if (err)
2211 goto out;
2212 zero_user(page, zerofrom, len);
2213 err = pagecache_write_end(file, mapping, curpos, len, len,
2214 page, fsdata);
2215 if (err < 0)
2216 goto out;
2217 BUG_ON(err != len);
2218 err = 0;
2221 /* page covers the boundary, find the boundary offset */
2222 if (index == curidx) {
2223 zerofrom = curpos & ~PAGE_CACHE_MASK;
2224 /* if we will expand the thing last block will be filled */
2225 if (offset <= zerofrom) {
2226 goto out;
2228 if (zerofrom & (blocksize-1)) {
2229 *bytes |= (blocksize-1);
2230 (*bytes)++;
2232 len = offset - zerofrom;
2234 err = pagecache_write_begin(file, mapping, curpos, len,
2235 AOP_FLAG_UNINTERRUPTIBLE,
2236 &page, &fsdata);
2237 if (err)
2238 goto out;
2239 zero_user(page, zerofrom, len);
2240 err = pagecache_write_end(file, mapping, curpos, len, len,
2241 page, fsdata);
2242 if (err < 0)
2243 goto out;
2244 BUG_ON(err != len);
2245 err = 0;
2247 out:
2248 return err;
2252 * For moronic filesystems that do not allow holes in file.
2253 * We may have to extend the file.
2255 int cont_write_begin(struct file *file, struct address_space *mapping,
2256 loff_t pos, unsigned len, unsigned flags,
2257 struct page **pagep, void **fsdata,
2258 get_block_t *get_block, loff_t *bytes)
2260 struct inode *inode = mapping->host;
2261 unsigned blocksize = 1 << inode->i_blkbits;
2262 unsigned zerofrom;
2263 int err;
2265 err = cont_expand_zero(file, mapping, pos, bytes);
2266 if (err)
2267 goto out;
2269 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2270 if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2271 *bytes |= (blocksize-1);
2272 (*bytes)++;
2275 *pagep = NULL;
2276 err = block_write_begin(file, mapping, pos, len,
2277 flags, pagep, fsdata, get_block);
2278 out:
2279 return err;
2282 int block_prepare_write(struct page *page, unsigned from, unsigned to,
2283 get_block_t *get_block)
2285 struct inode *inode = page->mapping->host;
2286 int err = __block_prepare_write(inode, page, from, to, get_block);
2287 if (err)
2288 ClearPageUptodate(page);
2289 return err;
2292 int block_commit_write(struct page *page, unsigned from, unsigned to)
2294 struct inode *inode = page->mapping->host;
2295 __block_commit_write(inode,page,from,to);
2296 return 0;
2299 int generic_commit_write(struct file *file, struct page *page,
2300 unsigned from, unsigned to)
2302 struct inode *inode = page->mapping->host;
2303 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2304 __block_commit_write(inode,page,from,to);
2306 * No need to use i_size_read() here, the i_size
2307 * cannot change under us because we hold i_mutex.
2309 if (pos > inode->i_size) {
2310 i_size_write(inode, pos);
2311 mark_inode_dirty(inode);
2313 return 0;
2317 * block_page_mkwrite() is not allowed to change the file size as it gets
2318 * called from a page fault handler when a page is first dirtied. Hence we must
2319 * be careful to check for EOF conditions here. We set the page up correctly
2320 * for a written page which means we get ENOSPC checking when writing into
2321 * holes and correct delalloc and unwritten extent mapping on filesystems that
2322 * support these features.
2324 * We are not allowed to take the i_mutex here so we have to play games to
2325 * protect against truncate races as the page could now be beyond EOF. Because
2326 * vmtruncate() writes the inode size before removing pages, once we have the
2327 * page lock we can determine safely if the page is beyond EOF. If it is not
2328 * beyond EOF, then the page is guaranteed safe against truncation until we
2329 * unlock the page.
2332 block_page_mkwrite(struct vm_area_struct *vma, struct page *page,
2333 get_block_t get_block)
2335 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
2336 unsigned long end;
2337 loff_t size;
2338 int ret = -EINVAL;
2340 lock_page(page);
2341 size = i_size_read(inode);
2342 if ((page->mapping != inode->i_mapping) ||
2343 (page_offset(page) > size)) {
2344 /* page got truncated out from underneath us */
2345 goto out_unlock;
2348 /* page is wholly or partially inside EOF */
2349 if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
2350 end = size & ~PAGE_CACHE_MASK;
2351 else
2352 end = PAGE_CACHE_SIZE;
2354 ret = block_prepare_write(page, 0, end, get_block);
2355 if (!ret)
2356 ret = block_commit_write(page, 0, end);
2358 out_unlock:
2359 unlock_page(page);
2360 return ret;
2364 * nobh_write_begin()'s prereads are special: the buffer_heads are freed
2365 * immediately, while under the page lock. So it needs a special end_io
2366 * handler which does not touch the bh after unlocking it.
2368 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2370 __end_buffer_read_notouch(bh, uptodate);
2374 * Attach the singly-linked list of buffers created by nobh_write_begin, to
2375 * the page (converting it to circular linked list and taking care of page
2376 * dirty races).
2378 static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
2380 struct buffer_head *bh;
2382 BUG_ON(!PageLocked(page));
2384 spin_lock(&page->mapping->private_lock);
2385 bh = head;
2386 do {
2387 if (PageDirty(page))
2388 set_buffer_dirty(bh);
2389 if (!bh->b_this_page)
2390 bh->b_this_page = head;
2391 bh = bh->b_this_page;
2392 } while (bh != head);
2393 attach_page_buffers(page, head);
2394 spin_unlock(&page->mapping->private_lock);
2398 * On entry, the page is fully not uptodate.
2399 * On exit the page is fully uptodate in the areas outside (from,to)
2401 int nobh_write_begin(struct file *file, struct address_space *mapping,
2402 loff_t pos, unsigned len, unsigned flags,
2403 struct page **pagep, void **fsdata,
2404 get_block_t *get_block)
2406 struct inode *inode = mapping->host;
2407 const unsigned blkbits = inode->i_blkbits;
2408 const unsigned blocksize = 1 << blkbits;
2409 struct buffer_head *head, *bh;
2410 struct page *page;
2411 pgoff_t index;
2412 unsigned from, to;
2413 unsigned block_in_page;
2414 unsigned block_start, block_end;
2415 sector_t block_in_file;
2416 int nr_reads = 0;
2417 int ret = 0;
2418 int is_mapped_to_disk = 1;
2420 index = pos >> PAGE_CACHE_SHIFT;
2421 from = pos & (PAGE_CACHE_SIZE - 1);
2422 to = from + len;
2424 page = __grab_cache_page(mapping, index);
2425 if (!page)
2426 return -ENOMEM;
2427 *pagep = page;
2428 *fsdata = NULL;
2430 if (page_has_buffers(page)) {
2431 unlock_page(page);
2432 page_cache_release(page);
2433 *pagep = NULL;
2434 return block_write_begin(file, mapping, pos, len, flags, pagep,
2435 fsdata, get_block);
2438 if (PageMappedToDisk(page))
2439 return 0;
2442 * Allocate buffers so that we can keep track of state, and potentially
2443 * attach them to the page if an error occurs. In the common case of
2444 * no error, they will just be freed again without ever being attached
2445 * to the page (which is all OK, because we're under the page lock).
2447 * Be careful: the buffer linked list is a NULL terminated one, rather
2448 * than the circular one we're used to.
2450 head = alloc_page_buffers(page, blocksize, 0);
2451 if (!head) {
2452 ret = -ENOMEM;
2453 goto out_release;
2456 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2459 * We loop across all blocks in the page, whether or not they are
2460 * part of the affected region. This is so we can discover if the
2461 * page is fully mapped-to-disk.
2463 for (block_start = 0, block_in_page = 0, bh = head;
2464 block_start < PAGE_CACHE_SIZE;
2465 block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
2466 int create;
2468 block_end = block_start + blocksize;
2469 bh->b_state = 0;
2470 create = 1;
2471 if (block_start >= to)
2472 create = 0;
2473 ret = get_block(inode, block_in_file + block_in_page,
2474 bh, create);
2475 if (ret)
2476 goto failed;
2477 if (!buffer_mapped(bh))
2478 is_mapped_to_disk = 0;
2479 if (buffer_new(bh))
2480 unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
2481 if (PageUptodate(page)) {
2482 set_buffer_uptodate(bh);
2483 continue;
2485 if (buffer_new(bh) || !buffer_mapped(bh)) {
2486 zero_user_segments(page, block_start, from,
2487 to, block_end);
2488 continue;
2490 if (buffer_uptodate(bh))
2491 continue; /* reiserfs does this */
2492 if (block_start < from || block_end > to) {
2493 lock_buffer(bh);
2494 bh->b_end_io = end_buffer_read_nobh;
2495 submit_bh(READ, bh);
2496 nr_reads++;
2500 if (nr_reads) {
2502 * The page is locked, so these buffers are protected from
2503 * any VM or truncate activity. Hence we don't need to care
2504 * for the buffer_head refcounts.
2506 for (bh = head; bh; bh = bh->b_this_page) {
2507 wait_on_buffer(bh);
2508 if (!buffer_uptodate(bh))
2509 ret = -EIO;
2511 if (ret)
2512 goto failed;
2515 if (is_mapped_to_disk)
2516 SetPageMappedToDisk(page);
2518 *fsdata = head; /* to be released by nobh_write_end */
2520 return 0;
2522 failed:
2523 BUG_ON(!ret);
2525 * Error recovery is a bit difficult. We need to zero out blocks that
2526 * were newly allocated, and dirty them to ensure they get written out.
2527 * Buffers need to be attached to the page at this point, otherwise
2528 * the handling of potential IO errors during writeout would be hard
2529 * (could try doing synchronous writeout, but what if that fails too?)
2531 attach_nobh_buffers(page, head);
2532 page_zero_new_buffers(page, from, to);
2534 out_release:
2535 unlock_page(page);
2536 page_cache_release(page);
2537 *pagep = NULL;
2539 if (pos + len > inode->i_size)
2540 vmtruncate(inode, inode->i_size);
2542 return ret;
2544 EXPORT_SYMBOL(nobh_write_begin);
2546 int nobh_write_end(struct file *file, struct address_space *mapping,
2547 loff_t pos, unsigned len, unsigned copied,
2548 struct page *page, void *fsdata)
2550 struct inode *inode = page->mapping->host;
2551 struct buffer_head *head = fsdata;
2552 struct buffer_head *bh;
2554 if (!PageMappedToDisk(page)) {
2555 if (unlikely(copied < len) && !page_has_buffers(page))
2556 attach_nobh_buffers(page, head);
2557 if (page_has_buffers(page))
2558 return generic_write_end(file, mapping, pos, len,
2559 copied, page, fsdata);
2562 SetPageUptodate(page);
2563 set_page_dirty(page);
2564 if (pos+copied > inode->i_size) {
2565 i_size_write(inode, pos+copied);
2566 mark_inode_dirty(inode);
2569 unlock_page(page);
2570 page_cache_release(page);
2572 while (head) {
2573 bh = head;
2574 head = head->b_this_page;
2575 free_buffer_head(bh);
2578 return copied;
2580 EXPORT_SYMBOL(nobh_write_end);
2583 * nobh_writepage() - based on block_full_write_page() except
2584 * that it tries to operate without attaching bufferheads to
2585 * the page.
2587 int nobh_writepage(struct page *page, get_block_t *get_block,
2588 struct writeback_control *wbc)
2590 struct inode * const inode = page->mapping->host;
2591 loff_t i_size = i_size_read(inode);
2592 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2593 unsigned offset;
2594 int ret;
2596 /* Is the page fully inside i_size? */
2597 if (page->index < end_index)
2598 goto out;
2600 /* Is the page fully outside i_size? (truncate in progress) */
2601 offset = i_size & (PAGE_CACHE_SIZE-1);
2602 if (page->index >= end_index+1 || !offset) {
2604 * The page may have dirty, unmapped buffers. For example,
2605 * they may have been added in ext3_writepage(). Make them
2606 * freeable here, so the page does not leak.
2608 #if 0
2609 /* Not really sure about this - do we need this ? */
2610 if (page->mapping->a_ops->invalidatepage)
2611 page->mapping->a_ops->invalidatepage(page, offset);
2612 #endif
2613 unlock_page(page);
2614 return 0; /* don't care */
2618 * The page straddles i_size. It must be zeroed out on each and every
2619 * writepage invocation because it may be mmapped. "A file is mapped
2620 * in multiples of the page size. For a file that is not a multiple of
2621 * the page size, the remaining memory is zeroed when mapped, and
2622 * writes to that region are not written out to the file."
2624 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2625 out:
2626 ret = mpage_writepage(page, get_block, wbc);
2627 if (ret == -EAGAIN)
2628 ret = __block_write_full_page(inode, page, get_block, wbc);
2629 return ret;
2631 EXPORT_SYMBOL(nobh_writepage);
2633 int nobh_truncate_page(struct address_space *mapping,
2634 loff_t from, get_block_t *get_block)
2636 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2637 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2638 unsigned blocksize;
2639 sector_t iblock;
2640 unsigned length, pos;
2641 struct inode *inode = mapping->host;
2642 struct page *page;
2643 struct buffer_head map_bh;
2644 int err;
2646 blocksize = 1 << inode->i_blkbits;
2647 length = offset & (blocksize - 1);
2649 /* Block boundary? Nothing to do */
2650 if (!length)
2651 return 0;
2653 length = blocksize - length;
2654 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2656 page = grab_cache_page(mapping, index);
2657 err = -ENOMEM;
2658 if (!page)
2659 goto out;
2661 if (page_has_buffers(page)) {
2662 has_buffers:
2663 unlock_page(page);
2664 page_cache_release(page);
2665 return block_truncate_page(mapping, from, get_block);
2668 /* Find the buffer that contains "offset" */
2669 pos = blocksize;
2670 while (offset >= pos) {
2671 iblock++;
2672 pos += blocksize;
2675 err = get_block(inode, iblock, &map_bh, 0);
2676 if (err)
2677 goto unlock;
2678 /* unmapped? It's a hole - nothing to do */
2679 if (!buffer_mapped(&map_bh))
2680 goto unlock;
2682 /* Ok, it's mapped. Make sure it's up-to-date */
2683 if (!PageUptodate(page)) {
2684 err = mapping->a_ops->readpage(NULL, page);
2685 if (err) {
2686 page_cache_release(page);
2687 goto out;
2689 lock_page(page);
2690 if (!PageUptodate(page)) {
2691 err = -EIO;
2692 goto unlock;
2694 if (page_has_buffers(page))
2695 goto has_buffers;
2697 zero_user(page, offset, length);
2698 set_page_dirty(page);
2699 err = 0;
2701 unlock:
2702 unlock_page(page);
2703 page_cache_release(page);
2704 out:
2705 return err;
2707 EXPORT_SYMBOL(nobh_truncate_page);
2709 int block_truncate_page(struct address_space *mapping,
2710 loff_t from, get_block_t *get_block)
2712 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2713 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2714 unsigned blocksize;
2715 sector_t iblock;
2716 unsigned length, pos;
2717 struct inode *inode = mapping->host;
2718 struct page *page;
2719 struct buffer_head *bh;
2720 int err;
2722 blocksize = 1 << inode->i_blkbits;
2723 length = offset & (blocksize - 1);
2725 /* Block boundary? Nothing to do */
2726 if (!length)
2727 return 0;
2729 length = blocksize - length;
2730 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2732 page = grab_cache_page(mapping, index);
2733 err = -ENOMEM;
2734 if (!page)
2735 goto out;
2737 if (!page_has_buffers(page))
2738 create_empty_buffers(page, blocksize, 0);
2740 /* Find the buffer that contains "offset" */
2741 bh = page_buffers(page);
2742 pos = blocksize;
2743 while (offset >= pos) {
2744 bh = bh->b_this_page;
2745 iblock++;
2746 pos += blocksize;
2749 err = 0;
2750 if (!buffer_mapped(bh)) {
2751 WARN_ON(bh->b_size != blocksize);
2752 err = get_block(inode, iblock, bh, 0);
2753 if (err)
2754 goto unlock;
2755 /* unmapped? It's a hole - nothing to do */
2756 if (!buffer_mapped(bh))
2757 goto unlock;
2760 /* Ok, it's mapped. Make sure it's up-to-date */
2761 if (PageUptodate(page))
2762 set_buffer_uptodate(bh);
2764 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2765 err = -EIO;
2766 ll_rw_block(READ, 1, &bh);
2767 wait_on_buffer(bh);
2768 /* Uhhuh. Read error. Complain and punt. */
2769 if (!buffer_uptodate(bh))
2770 goto unlock;
2773 zero_user(page, offset, length);
2774 mark_buffer_dirty(bh);
2775 err = 0;
2777 unlock:
2778 unlock_page(page);
2779 page_cache_release(page);
2780 out:
2781 return err;
2785 * The generic ->writepage function for buffer-backed address_spaces
2787 int block_write_full_page(struct page *page, get_block_t *get_block,
2788 struct writeback_control *wbc)
2790 struct inode * const inode = page->mapping->host;
2791 loff_t i_size = i_size_read(inode);
2792 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2793 unsigned offset;
2795 /* Is the page fully inside i_size? */
2796 if (page->index < end_index)
2797 return __block_write_full_page(inode, page, get_block, wbc);
2799 /* Is the page fully outside i_size? (truncate in progress) */
2800 offset = i_size & (PAGE_CACHE_SIZE-1);
2801 if (page->index >= end_index+1 || !offset) {
2803 * The page may have dirty, unmapped buffers. For example,
2804 * they may have been added in ext3_writepage(). Make them
2805 * freeable here, so the page does not leak.
2807 do_invalidatepage(page, 0);
2808 unlock_page(page);
2809 return 0; /* don't care */
2813 * The page straddles i_size. It must be zeroed out on each and every
2814 * writepage invokation because it may be mmapped. "A file is mapped
2815 * in multiples of the page size. For a file that is not a multiple of
2816 * the page size, the remaining memory is zeroed when mapped, and
2817 * writes to that region are not written out to the file."
2819 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2820 return __block_write_full_page(inode, page, get_block, wbc);
2823 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2824 get_block_t *get_block)
2826 struct buffer_head tmp;
2827 struct inode *inode = mapping->host;
2828 tmp.b_state = 0;
2829 tmp.b_blocknr = 0;
2830 tmp.b_size = 1 << inode->i_blkbits;
2831 get_block(inode, block, &tmp, 0);
2832 return tmp.b_blocknr;
2835 static void end_bio_bh_io_sync(struct bio *bio, int err)
2837 struct buffer_head *bh = bio->bi_private;
2839 if (err == -EOPNOTSUPP) {
2840 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2841 set_bit(BH_Eopnotsupp, &bh->b_state);
2844 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2845 bio_put(bio);
2848 int submit_bh(int rw, struct buffer_head * bh)
2850 struct bio *bio;
2851 int ret = 0;
2853 BUG_ON(!buffer_locked(bh));
2854 BUG_ON(!buffer_mapped(bh));
2855 BUG_ON(!bh->b_end_io);
2857 if (buffer_ordered(bh) && (rw == WRITE))
2858 rw = WRITE_BARRIER;
2861 * Only clear out a write error when rewriting, should this
2862 * include WRITE_SYNC as well?
2864 if (test_set_buffer_req(bh) && (rw == WRITE || rw == WRITE_BARRIER))
2865 clear_buffer_write_io_error(bh);
2868 * from here on down, it's all bio -- do the initial mapping,
2869 * submit_bio -> generic_make_request may further map this bio around
2871 bio = bio_alloc(GFP_NOIO, 1);
2873 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2874 bio->bi_bdev = bh->b_bdev;
2875 bio->bi_io_vec[0].bv_page = bh->b_page;
2876 bio->bi_io_vec[0].bv_len = bh->b_size;
2877 bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2879 bio->bi_vcnt = 1;
2880 bio->bi_idx = 0;
2881 bio->bi_size = bh->b_size;
2883 bio->bi_end_io = end_bio_bh_io_sync;
2884 bio->bi_private = bh;
2886 bio_get(bio);
2887 submit_bio(rw, bio);
2889 if (bio_flagged(bio, BIO_EOPNOTSUPP))
2890 ret = -EOPNOTSUPP;
2892 bio_put(bio);
2893 return ret;
2897 * ll_rw_block: low-level access to block devices (DEPRECATED)
2898 * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
2899 * @nr: number of &struct buffer_heads in the array
2900 * @bhs: array of pointers to &struct buffer_head
2902 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2903 * requests an I/O operation on them, either a %READ or a %WRITE. The third
2904 * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
2905 * are sent to disk. The fourth %READA option is described in the documentation
2906 * for generic_make_request() which ll_rw_block() calls.
2908 * This function drops any buffer that it cannot get a lock on (with the
2909 * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
2910 * clean when doing a write request, and any buffer that appears to be
2911 * up-to-date when doing read request. Further it marks as clean buffers that
2912 * are processed for writing (the buffer cache won't assume that they are
2913 * actually clean until the buffer gets unlocked).
2915 * ll_rw_block sets b_end_io to simple completion handler that marks
2916 * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2917 * any waiters.
2919 * All of the buffers must be for the same device, and must also be a
2920 * multiple of the current approved size for the device.
2922 void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2924 int i;
2926 for (i = 0; i < nr; i++) {
2927 struct buffer_head *bh = bhs[i];
2929 if (rw == SWRITE)
2930 lock_buffer(bh);
2931 else if (test_set_buffer_locked(bh))
2932 continue;
2934 if (rw == WRITE || rw == SWRITE) {
2935 if (test_clear_buffer_dirty(bh)) {
2936 bh->b_end_io = end_buffer_write_sync;
2937 get_bh(bh);
2938 submit_bh(WRITE, bh);
2939 continue;
2941 } else {
2942 if (!buffer_uptodate(bh)) {
2943 bh->b_end_io = end_buffer_read_sync;
2944 get_bh(bh);
2945 submit_bh(rw, bh);
2946 continue;
2949 unlock_buffer(bh);
2954 * For a data-integrity writeout, we need to wait upon any in-progress I/O
2955 * and then start new I/O and then wait upon it. The caller must have a ref on
2956 * the buffer_head.
2958 int sync_dirty_buffer(struct buffer_head *bh)
2960 int ret = 0;
2962 WARN_ON(atomic_read(&bh->b_count) < 1);
2963 lock_buffer(bh);
2964 if (test_clear_buffer_dirty(bh)) {
2965 get_bh(bh);
2966 bh->b_end_io = end_buffer_write_sync;
2967 ret = submit_bh(WRITE, bh);
2968 wait_on_buffer(bh);
2969 if (buffer_eopnotsupp(bh)) {
2970 clear_buffer_eopnotsupp(bh);
2971 ret = -EOPNOTSUPP;
2973 if (!ret && !buffer_uptodate(bh))
2974 ret = -EIO;
2975 } else {
2976 unlock_buffer(bh);
2978 return ret;
2982 * try_to_free_buffers() checks if all the buffers on this particular page
2983 * are unused, and releases them if so.
2985 * Exclusion against try_to_free_buffers may be obtained by either
2986 * locking the page or by holding its mapping's private_lock.
2988 * If the page is dirty but all the buffers are clean then we need to
2989 * be sure to mark the page clean as well. This is because the page
2990 * may be against a block device, and a later reattachment of buffers
2991 * to a dirty page will set *all* buffers dirty. Which would corrupt
2992 * filesystem data on the same device.
2994 * The same applies to regular filesystem pages: if all the buffers are
2995 * clean then we set the page clean and proceed. To do that, we require
2996 * total exclusion from __set_page_dirty_buffers(). That is obtained with
2997 * private_lock.
2999 * try_to_free_buffers() is non-blocking.
3001 static inline int buffer_busy(struct buffer_head *bh)
3003 return atomic_read(&bh->b_count) |
3004 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
3007 static int
3008 drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
3010 struct buffer_head *head = page_buffers(page);
3011 struct buffer_head *bh;
3013 bh = head;
3014 do {
3015 if (buffer_write_io_error(bh) && page->mapping)
3016 set_bit(AS_EIO, &page->mapping->flags);
3017 if (buffer_busy(bh))
3018 goto failed;
3019 bh = bh->b_this_page;
3020 } while (bh != head);
3022 do {
3023 struct buffer_head *next = bh->b_this_page;
3025 if (!list_empty(&bh->b_assoc_buffers))
3026 __remove_assoc_queue(bh);
3027 bh = next;
3028 } while (bh != head);
3029 *buffers_to_free = head;
3030 __clear_page_buffers(page);
3031 return 1;
3032 failed:
3033 return 0;
3036 int try_to_free_buffers(struct page *page)
3038 struct address_space * const mapping = page->mapping;
3039 struct buffer_head *buffers_to_free = NULL;
3040 int ret = 0;
3042 BUG_ON(!PageLocked(page));
3043 if (PageWriteback(page))
3044 return 0;
3046 if (mapping == NULL) { /* can this still happen? */
3047 ret = drop_buffers(page, &buffers_to_free);
3048 goto out;
3051 spin_lock(&mapping->private_lock);
3052 ret = drop_buffers(page, &buffers_to_free);
3055 * If the filesystem writes its buffers by hand (eg ext3)
3056 * then we can have clean buffers against a dirty page. We
3057 * clean the page here; otherwise the VM will never notice
3058 * that the filesystem did any IO at all.
3060 * Also, during truncate, discard_buffer will have marked all
3061 * the page's buffers clean. We discover that here and clean
3062 * the page also.
3064 * private_lock must be held over this entire operation in order
3065 * to synchronise against __set_page_dirty_buffers and prevent the
3066 * dirty bit from being lost.
3068 if (ret)
3069 cancel_dirty_page(page, PAGE_CACHE_SIZE);
3070 spin_unlock(&mapping->private_lock);
3071 out:
3072 if (buffers_to_free) {
3073 struct buffer_head *bh = buffers_to_free;
3075 do {
3076 struct buffer_head *next = bh->b_this_page;
3077 free_buffer_head(bh);
3078 bh = next;
3079 } while (bh != buffers_to_free);
3081 return ret;
3083 EXPORT_SYMBOL(try_to_free_buffers);
3085 void block_sync_page(struct page *page)
3087 struct address_space *mapping;
3089 smp_mb();
3090 mapping = page_mapping(page);
3091 if (mapping)
3092 blk_run_backing_dev(mapping->backing_dev_info, page);
3096 * There are no bdflush tunables left. But distributions are
3097 * still running obsolete flush daemons, so we terminate them here.
3099 * Use of bdflush() is deprecated and will be removed in a future kernel.
3100 * The `pdflush' kernel threads fully replace bdflush daemons and this call.
3102 asmlinkage long sys_bdflush(int func, long data)
3104 static int msg_count;
3106 if (!capable(CAP_SYS_ADMIN))
3107 return -EPERM;
3109 if (msg_count < 5) {
3110 msg_count++;
3111 printk(KERN_INFO
3112 "warning: process `%s' used the obsolete bdflush"
3113 " system call\n", current->comm);
3114 printk(KERN_INFO "Fix your initscripts?\n");
3117 if (func == 1)
3118 do_exit(0);
3119 return 0;
3123 * Buffer-head allocation
3125 static struct kmem_cache *bh_cachep;
3128 * Once the number of bh's in the machine exceeds this level, we start
3129 * stripping them in writeback.
3131 static int max_buffer_heads;
3133 int buffer_heads_over_limit;
3135 struct bh_accounting {
3136 int nr; /* Number of live bh's */
3137 int ratelimit; /* Limit cacheline bouncing */
3140 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3142 static void recalc_bh_state(void)
3144 int i;
3145 int tot = 0;
3147 if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
3148 return;
3149 __get_cpu_var(bh_accounting).ratelimit = 0;
3150 for_each_online_cpu(i)
3151 tot += per_cpu(bh_accounting, i).nr;
3152 buffer_heads_over_limit = (tot > max_buffer_heads);
3155 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3157 struct buffer_head *ret = kmem_cache_alloc(bh_cachep,
3158 set_migrateflags(gfp_flags, __GFP_RECLAIMABLE));
3159 if (ret) {
3160 INIT_LIST_HEAD(&ret->b_assoc_buffers);
3161 get_cpu_var(bh_accounting).nr++;
3162 recalc_bh_state();
3163 put_cpu_var(bh_accounting);
3165 return ret;
3167 EXPORT_SYMBOL(alloc_buffer_head);
3169 void free_buffer_head(struct buffer_head *bh)
3171 BUG_ON(!list_empty(&bh->b_assoc_buffers));
3172 kmem_cache_free(bh_cachep, bh);
3173 get_cpu_var(bh_accounting).nr--;
3174 recalc_bh_state();
3175 put_cpu_var(bh_accounting);
3177 EXPORT_SYMBOL(free_buffer_head);
3179 static void buffer_exit_cpu(int cpu)
3181 int i;
3182 struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3184 for (i = 0; i < BH_LRU_SIZE; i++) {
3185 brelse(b->bhs[i]);
3186 b->bhs[i] = NULL;
3188 get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
3189 per_cpu(bh_accounting, cpu).nr = 0;
3190 put_cpu_var(bh_accounting);
3193 static int buffer_cpu_notify(struct notifier_block *self,
3194 unsigned long action, void *hcpu)
3196 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
3197 buffer_exit_cpu((unsigned long)hcpu);
3198 return NOTIFY_OK;
3202 * bh_uptodate_or_lock: Test whether the buffer is uptodate
3203 * @bh: struct buffer_head
3205 * Return true if the buffer is up-to-date and false,
3206 * with the buffer locked, if not.
3208 int bh_uptodate_or_lock(struct buffer_head *bh)
3210 if (!buffer_uptodate(bh)) {
3211 lock_buffer(bh);
3212 if (!buffer_uptodate(bh))
3213 return 0;
3214 unlock_buffer(bh);
3216 return 1;
3218 EXPORT_SYMBOL(bh_uptodate_or_lock);
3221 * bh_submit_read: Submit a locked buffer for reading
3222 * @bh: struct buffer_head
3224 * Returns zero on success and -EIO on error.
3226 int bh_submit_read(struct buffer_head *bh)
3228 BUG_ON(!buffer_locked(bh));
3230 if (buffer_uptodate(bh)) {
3231 unlock_buffer(bh);
3232 return 0;
3235 get_bh(bh);
3236 bh->b_end_io = end_buffer_read_sync;
3237 submit_bh(READ, bh);
3238 wait_on_buffer(bh);
3239 if (buffer_uptodate(bh))
3240 return 0;
3241 return -EIO;
3243 EXPORT_SYMBOL(bh_submit_read);
3245 static void
3246 init_buffer_head(struct kmem_cache *cachep, void *data)
3248 struct buffer_head *bh = data;
3250 memset(bh, 0, sizeof(*bh));
3251 INIT_LIST_HEAD(&bh->b_assoc_buffers);
3254 void __init buffer_init(void)
3256 int nrpages;
3258 bh_cachep = kmem_cache_create("buffer_head",
3259 sizeof(struct buffer_head), 0,
3260 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3261 SLAB_MEM_SPREAD),
3262 init_buffer_head);
3265 * Limit the bh occupancy to 10% of ZONE_NORMAL
3267 nrpages = (nr_free_buffer_pages() * 10) / 100;
3268 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3269 hotcpu_notifier(buffer_cpu_notify, 0);
3272 EXPORT_SYMBOL(__bforget);
3273 EXPORT_SYMBOL(__brelse);
3274 EXPORT_SYMBOL(__wait_on_buffer);
3275 EXPORT_SYMBOL(block_commit_write);
3276 EXPORT_SYMBOL(block_prepare_write);
3277 EXPORT_SYMBOL(block_page_mkwrite);
3278 EXPORT_SYMBOL(block_read_full_page);
3279 EXPORT_SYMBOL(block_sync_page);
3280 EXPORT_SYMBOL(block_truncate_page);
3281 EXPORT_SYMBOL(block_write_full_page);
3282 EXPORT_SYMBOL(cont_write_begin);
3283 EXPORT_SYMBOL(end_buffer_read_sync);
3284 EXPORT_SYMBOL(end_buffer_write_sync);
3285 EXPORT_SYMBOL(file_fsync);
3286 EXPORT_SYMBOL(fsync_bdev);
3287 EXPORT_SYMBOL(generic_block_bmap);
3288 EXPORT_SYMBOL(generic_commit_write);
3289 EXPORT_SYMBOL(generic_cont_expand_simple);
3290 EXPORT_SYMBOL(init_buffer);
3291 EXPORT_SYMBOL(invalidate_bdev);
3292 EXPORT_SYMBOL(ll_rw_block);
3293 EXPORT_SYMBOL(mark_buffer_dirty);
3294 EXPORT_SYMBOL(submit_bh);
3295 EXPORT_SYMBOL(sync_dirty_buffer);
3296 EXPORT_SYMBOL(unlock_buffer);