Merge branch 'x86-vdso-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / fs / buffer.c
blob698c6b2cc462ab067debcab84f77cffcb785a9cd
1 /*
2 * linux/fs/buffer.c
4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
5 */
7 /*
8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
10 * Removed a lot of unnecessary code and simplified things now that
11 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
13 * Speed up hash, lru, and free list operations. Use gfp() for allocating
14 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
16 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
18 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
21 #include <linux/kernel.h>
22 #include <linux/syscalls.h>
23 #include <linux/fs.h>
24 #include <linux/mm.h>
25 #include <linux/percpu.h>
26 #include <linux/slab.h>
27 #include <linux/capability.h>
28 #include <linux/blkdev.h>
29 #include <linux/file.h>
30 #include <linux/quotaops.h>
31 #include <linux/highmem.h>
32 #include <linux/module.h>
33 #include <linux/writeback.h>
34 #include <linux/hash.h>
35 #include <linux/suspend.h>
36 #include <linux/buffer_head.h>
37 #include <linux/task_io_accounting_ops.h>
38 #include <linux/bio.h>
39 #include <linux/notifier.h>
40 #include <linux/cpu.h>
41 #include <linux/bitops.h>
42 #include <linux/mpage.h>
43 #include <linux/bit_spinlock.h>
44 #include <linux/cleancache.h>
46 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
48 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
50 inline void
51 init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
53 bh->b_end_io = handler;
54 bh->b_private = private;
56 EXPORT_SYMBOL(init_buffer);
58 static int sleep_on_buffer(void *word)
60 io_schedule();
61 return 0;
64 void __lock_buffer(struct buffer_head *bh)
66 wait_on_bit_lock(&bh->b_state, BH_Lock, sleep_on_buffer,
67 TASK_UNINTERRUPTIBLE);
69 EXPORT_SYMBOL(__lock_buffer);
71 void unlock_buffer(struct buffer_head *bh)
73 clear_bit_unlock(BH_Lock, &bh->b_state);
74 smp_mb__after_clear_bit();
75 wake_up_bit(&bh->b_state, BH_Lock);
77 EXPORT_SYMBOL(unlock_buffer);
80 * Block until a buffer comes unlocked. This doesn't stop it
81 * from becoming locked again - you have to lock it yourself
82 * if you want to preserve its state.
84 void __wait_on_buffer(struct buffer_head * bh)
86 wait_on_bit(&bh->b_state, BH_Lock, sleep_on_buffer, TASK_UNINTERRUPTIBLE);
88 EXPORT_SYMBOL(__wait_on_buffer);
90 static void
91 __clear_page_buffers(struct page *page)
93 ClearPagePrivate(page);
94 set_page_private(page, 0);
95 page_cache_release(page);
99 static int quiet_error(struct buffer_head *bh)
101 if (!test_bit(BH_Quiet, &bh->b_state) && printk_ratelimit())
102 return 0;
103 return 1;
107 static void buffer_io_error(struct buffer_head *bh)
109 char b[BDEVNAME_SIZE];
110 printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
111 bdevname(bh->b_bdev, b),
112 (unsigned long long)bh->b_blocknr);
116 * End-of-IO handler helper function which does not touch the bh after
117 * unlocking it.
118 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
119 * a race there is benign: unlock_buffer() only use the bh's address for
120 * hashing after unlocking the buffer, so it doesn't actually touch the bh
121 * itself.
123 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
125 if (uptodate) {
126 set_buffer_uptodate(bh);
127 } else {
128 /* This happens, due to failed READA attempts. */
129 clear_buffer_uptodate(bh);
131 unlock_buffer(bh);
135 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
136 * unlock the buffer. This is what ll_rw_block uses too.
138 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
140 __end_buffer_read_notouch(bh, uptodate);
141 put_bh(bh);
143 EXPORT_SYMBOL(end_buffer_read_sync);
145 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
147 char b[BDEVNAME_SIZE];
149 if (uptodate) {
150 set_buffer_uptodate(bh);
151 } else {
152 if (!quiet_error(bh)) {
153 buffer_io_error(bh);
154 printk(KERN_WARNING "lost page write due to "
155 "I/O error on %s\n",
156 bdevname(bh->b_bdev, b));
158 set_buffer_write_io_error(bh);
159 clear_buffer_uptodate(bh);
161 unlock_buffer(bh);
162 put_bh(bh);
164 EXPORT_SYMBOL(end_buffer_write_sync);
167 * Various filesystems appear to want __find_get_block to be non-blocking.
168 * But it's the page lock which protects the buffers. To get around this,
169 * we get exclusion from try_to_free_buffers with the blockdev mapping's
170 * private_lock.
172 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
173 * may be quite high. This code could TryLock the page, and if that
174 * succeeds, there is no need to take private_lock. (But if
175 * private_lock is contended then so is mapping->tree_lock).
177 static struct buffer_head *
178 __find_get_block_slow(struct block_device *bdev, sector_t block)
180 struct inode *bd_inode = bdev->bd_inode;
181 struct address_space *bd_mapping = bd_inode->i_mapping;
182 struct buffer_head *ret = NULL;
183 pgoff_t index;
184 struct buffer_head *bh;
185 struct buffer_head *head;
186 struct page *page;
187 int all_mapped = 1;
189 index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
190 page = find_get_page(bd_mapping, index);
191 if (!page)
192 goto out;
194 spin_lock(&bd_mapping->private_lock);
195 if (!page_has_buffers(page))
196 goto out_unlock;
197 head = page_buffers(page);
198 bh = head;
199 do {
200 if (!buffer_mapped(bh))
201 all_mapped = 0;
202 else if (bh->b_blocknr == block) {
203 ret = bh;
204 get_bh(bh);
205 goto out_unlock;
207 bh = bh->b_this_page;
208 } while (bh != head);
210 /* we might be here because some of the buffers on this page are
211 * not mapped. This is due to various races between
212 * file io on the block device and getblk. It gets dealt with
213 * elsewhere, don't buffer_error if we had some unmapped buffers
215 if (all_mapped) {
216 printk("__find_get_block_slow() failed. "
217 "block=%llu, b_blocknr=%llu\n",
218 (unsigned long long)block,
219 (unsigned long long)bh->b_blocknr);
220 printk("b_state=0x%08lx, b_size=%zu\n",
221 bh->b_state, bh->b_size);
222 printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
224 out_unlock:
225 spin_unlock(&bd_mapping->private_lock);
226 page_cache_release(page);
227 out:
228 return ret;
231 /* If invalidate_buffers() will trash dirty buffers, it means some kind
232 of fs corruption is going on. Trashing dirty data always imply losing
233 information that was supposed to be just stored on the physical layer
234 by the user.
236 Thus invalidate_buffers in general usage is not allwowed to trash
237 dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
238 be preserved. These buffers are simply skipped.
240 We also skip buffers which are still in use. For example this can
241 happen if a userspace program is reading the block device.
243 NOTE: In the case where the user removed a removable-media-disk even if
244 there's still dirty data not synced on disk (due a bug in the device driver
245 or due an error of the user), by not destroying the dirty buffers we could
246 generate corruption also on the next media inserted, thus a parameter is
247 necessary to handle this case in the most safe way possible (trying
248 to not corrupt also the new disk inserted with the data belonging to
249 the old now corrupted disk). Also for the ramdisk the natural thing
250 to do in order to release the ramdisk memory is to destroy dirty buffers.
252 These are two special cases. Normal usage imply the device driver
253 to issue a sync on the device (without waiting I/O completion) and
254 then an invalidate_buffers call that doesn't trash dirty buffers.
256 For handling cache coherency with the blkdev pagecache the 'update' case
257 is been introduced. It is needed to re-read from disk any pinned
258 buffer. NOTE: re-reading from disk is destructive so we can do it only
259 when we assume nobody is changing the buffercache under our I/O and when
260 we think the disk contains more recent information than the buffercache.
261 The update == 1 pass marks the buffers we need to update, the update == 2
262 pass does the actual I/O. */
263 void invalidate_bdev(struct block_device *bdev)
265 struct address_space *mapping = bdev->bd_inode->i_mapping;
267 if (mapping->nrpages == 0)
268 return;
270 invalidate_bh_lrus();
271 lru_add_drain_all(); /* make sure all lru add caches are flushed */
272 invalidate_mapping_pages(mapping, 0, -1);
273 /* 99% of the time, we don't need to flush the cleancache on the bdev.
274 * But, for the strange corners, lets be cautious
276 cleancache_flush_inode(mapping);
278 EXPORT_SYMBOL(invalidate_bdev);
281 * Kick the writeback threads then try to free up some ZONE_NORMAL memory.
283 static void free_more_memory(void)
285 struct zone *zone;
286 int nid;
288 wakeup_flusher_threads(1024);
289 yield();
291 for_each_online_node(nid) {
292 (void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS),
293 gfp_zone(GFP_NOFS), NULL,
294 &zone);
295 if (zone)
296 try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
297 GFP_NOFS, NULL);
302 * I/O completion handler for block_read_full_page() - pages
303 * which come unlocked at the end of I/O.
305 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
307 unsigned long flags;
308 struct buffer_head *first;
309 struct buffer_head *tmp;
310 struct page *page;
311 int page_uptodate = 1;
313 BUG_ON(!buffer_async_read(bh));
315 page = bh->b_page;
316 if (uptodate) {
317 set_buffer_uptodate(bh);
318 } else {
319 clear_buffer_uptodate(bh);
320 if (!quiet_error(bh))
321 buffer_io_error(bh);
322 SetPageError(page);
326 * Be _very_ careful from here on. Bad things can happen if
327 * two buffer heads end IO at almost the same time and both
328 * decide that the page is now completely done.
330 first = page_buffers(page);
331 local_irq_save(flags);
332 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
333 clear_buffer_async_read(bh);
334 unlock_buffer(bh);
335 tmp = bh;
336 do {
337 if (!buffer_uptodate(tmp))
338 page_uptodate = 0;
339 if (buffer_async_read(tmp)) {
340 BUG_ON(!buffer_locked(tmp));
341 goto still_busy;
343 tmp = tmp->b_this_page;
344 } while (tmp != bh);
345 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
346 local_irq_restore(flags);
349 * If none of the buffers had errors and they are all
350 * uptodate then we can set the page uptodate.
352 if (page_uptodate && !PageError(page))
353 SetPageUptodate(page);
354 unlock_page(page);
355 return;
357 still_busy:
358 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
359 local_irq_restore(flags);
360 return;
364 * Completion handler for block_write_full_page() - pages which are unlocked
365 * during I/O, and which have PageWriteback cleared upon I/O completion.
367 void end_buffer_async_write(struct buffer_head *bh, int uptodate)
369 char b[BDEVNAME_SIZE];
370 unsigned long flags;
371 struct buffer_head *first;
372 struct buffer_head *tmp;
373 struct page *page;
375 BUG_ON(!buffer_async_write(bh));
377 page = bh->b_page;
378 if (uptodate) {
379 set_buffer_uptodate(bh);
380 } else {
381 if (!quiet_error(bh)) {
382 buffer_io_error(bh);
383 printk(KERN_WARNING "lost page write due to "
384 "I/O error on %s\n",
385 bdevname(bh->b_bdev, b));
387 set_bit(AS_EIO, &page->mapping->flags);
388 set_buffer_write_io_error(bh);
389 clear_buffer_uptodate(bh);
390 SetPageError(page);
393 first = page_buffers(page);
394 local_irq_save(flags);
395 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
397 clear_buffer_async_write(bh);
398 unlock_buffer(bh);
399 tmp = bh->b_this_page;
400 while (tmp != bh) {
401 if (buffer_async_write(tmp)) {
402 BUG_ON(!buffer_locked(tmp));
403 goto still_busy;
405 tmp = tmp->b_this_page;
407 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
408 local_irq_restore(flags);
409 end_page_writeback(page);
410 return;
412 still_busy:
413 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
414 local_irq_restore(flags);
415 return;
417 EXPORT_SYMBOL(end_buffer_async_write);
420 * If a page's buffers are under async readin (end_buffer_async_read
421 * completion) then there is a possibility that another thread of
422 * control could lock one of the buffers after it has completed
423 * but while some of the other buffers have not completed. This
424 * locked buffer would confuse end_buffer_async_read() into not unlocking
425 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
426 * that this buffer is not under async I/O.
428 * The page comes unlocked when it has no locked buffer_async buffers
429 * left.
431 * PageLocked prevents anyone starting new async I/O reads any of
432 * the buffers.
434 * PageWriteback is used to prevent simultaneous writeout of the same
435 * page.
437 * PageLocked prevents anyone from starting writeback of a page which is
438 * under read I/O (PageWriteback is only ever set against a locked page).
440 static void mark_buffer_async_read(struct buffer_head *bh)
442 bh->b_end_io = end_buffer_async_read;
443 set_buffer_async_read(bh);
446 static void mark_buffer_async_write_endio(struct buffer_head *bh,
447 bh_end_io_t *handler)
449 bh->b_end_io = handler;
450 set_buffer_async_write(bh);
453 void mark_buffer_async_write(struct buffer_head *bh)
455 mark_buffer_async_write_endio(bh, end_buffer_async_write);
457 EXPORT_SYMBOL(mark_buffer_async_write);
461 * fs/buffer.c contains helper functions for buffer-backed address space's
462 * fsync functions. A common requirement for buffer-based filesystems is
463 * that certain data from the backing blockdev needs to be written out for
464 * a successful fsync(). For example, ext2 indirect blocks need to be
465 * written back and waited upon before fsync() returns.
467 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
468 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
469 * management of a list of dependent buffers at ->i_mapping->private_list.
471 * Locking is a little subtle: try_to_free_buffers() will remove buffers
472 * from their controlling inode's queue when they are being freed. But
473 * try_to_free_buffers() will be operating against the *blockdev* mapping
474 * at the time, not against the S_ISREG file which depends on those buffers.
475 * So the locking for private_list is via the private_lock in the address_space
476 * which backs the buffers. Which is different from the address_space
477 * against which the buffers are listed. So for a particular address_space,
478 * mapping->private_lock does *not* protect mapping->private_list! In fact,
479 * mapping->private_list will always be protected by the backing blockdev's
480 * ->private_lock.
482 * Which introduces a requirement: all buffers on an address_space's
483 * ->private_list must be from the same address_space: the blockdev's.
485 * address_spaces which do not place buffers at ->private_list via these
486 * utility functions are free to use private_lock and private_list for
487 * whatever they want. The only requirement is that list_empty(private_list)
488 * be true at clear_inode() time.
490 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
491 * filesystems should do that. invalidate_inode_buffers() should just go
492 * BUG_ON(!list_empty).
494 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
495 * take an address_space, not an inode. And it should be called
496 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
497 * queued up.
499 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
500 * list if it is already on a list. Because if the buffer is on a list,
501 * it *must* already be on the right one. If not, the filesystem is being
502 * silly. This will save a ton of locking. But first we have to ensure
503 * that buffers are taken *off* the old inode's list when they are freed
504 * (presumably in truncate). That requires careful auditing of all
505 * filesystems (do it inside bforget()). It could also be done by bringing
506 * b_inode back.
510 * The buffer's backing address_space's private_lock must be held
512 static void __remove_assoc_queue(struct buffer_head *bh)
514 list_del_init(&bh->b_assoc_buffers);
515 WARN_ON(!bh->b_assoc_map);
516 if (buffer_write_io_error(bh))
517 set_bit(AS_EIO, &bh->b_assoc_map->flags);
518 bh->b_assoc_map = NULL;
521 int inode_has_buffers(struct inode *inode)
523 return !list_empty(&inode->i_data.private_list);
527 * osync is designed to support O_SYNC io. It waits synchronously for
528 * all already-submitted IO to complete, but does not queue any new
529 * writes to the disk.
531 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
532 * you dirty the buffers, and then use osync_inode_buffers to wait for
533 * completion. Any other dirty buffers which are not yet queued for
534 * write will not be flushed to disk by the osync.
536 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
538 struct buffer_head *bh;
539 struct list_head *p;
540 int err = 0;
542 spin_lock(lock);
543 repeat:
544 list_for_each_prev(p, list) {
545 bh = BH_ENTRY(p);
546 if (buffer_locked(bh)) {
547 get_bh(bh);
548 spin_unlock(lock);
549 wait_on_buffer(bh);
550 if (!buffer_uptodate(bh))
551 err = -EIO;
552 brelse(bh);
553 spin_lock(lock);
554 goto repeat;
557 spin_unlock(lock);
558 return err;
561 static void do_thaw_one(struct super_block *sb, void *unused)
563 char b[BDEVNAME_SIZE];
564 while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
565 printk(KERN_WARNING "Emergency Thaw on %s\n",
566 bdevname(sb->s_bdev, b));
569 static void do_thaw_all(struct work_struct *work)
571 iterate_supers(do_thaw_one, NULL);
572 kfree(work);
573 printk(KERN_WARNING "Emergency Thaw complete\n");
577 * emergency_thaw_all -- forcibly thaw every frozen filesystem
579 * Used for emergency unfreeze of all filesystems via SysRq
581 void emergency_thaw_all(void)
583 struct work_struct *work;
585 work = kmalloc(sizeof(*work), GFP_ATOMIC);
586 if (work) {
587 INIT_WORK(work, do_thaw_all);
588 schedule_work(work);
593 * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
594 * @mapping: the mapping which wants those buffers written
596 * Starts I/O against the buffers at mapping->private_list, and waits upon
597 * that I/O.
599 * Basically, this is a convenience function for fsync().
600 * @mapping is a file or directory which needs those buffers to be written for
601 * a successful fsync().
603 int sync_mapping_buffers(struct address_space *mapping)
605 struct address_space *buffer_mapping = mapping->assoc_mapping;
607 if (buffer_mapping == NULL || list_empty(&mapping->private_list))
608 return 0;
610 return fsync_buffers_list(&buffer_mapping->private_lock,
611 &mapping->private_list);
613 EXPORT_SYMBOL(sync_mapping_buffers);
616 * Called when we've recently written block `bblock', and it is known that
617 * `bblock' was for a buffer_boundary() buffer. This means that the block at
618 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
619 * dirty, schedule it for IO. So that indirects merge nicely with their data.
621 void write_boundary_block(struct block_device *bdev,
622 sector_t bblock, unsigned blocksize)
624 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
625 if (bh) {
626 if (buffer_dirty(bh))
627 ll_rw_block(WRITE, 1, &bh);
628 put_bh(bh);
632 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
634 struct address_space *mapping = inode->i_mapping;
635 struct address_space *buffer_mapping = bh->b_page->mapping;
637 mark_buffer_dirty(bh);
638 if (!mapping->assoc_mapping) {
639 mapping->assoc_mapping = buffer_mapping;
640 } else {
641 BUG_ON(mapping->assoc_mapping != buffer_mapping);
643 if (!bh->b_assoc_map) {
644 spin_lock(&buffer_mapping->private_lock);
645 list_move_tail(&bh->b_assoc_buffers,
646 &mapping->private_list);
647 bh->b_assoc_map = mapping;
648 spin_unlock(&buffer_mapping->private_lock);
651 EXPORT_SYMBOL(mark_buffer_dirty_inode);
654 * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
655 * dirty.
657 * If warn is true, then emit a warning if the page is not uptodate and has
658 * not been truncated.
660 static void __set_page_dirty(struct page *page,
661 struct address_space *mapping, int warn)
663 spin_lock_irq(&mapping->tree_lock);
664 if (page->mapping) { /* Race with truncate? */
665 WARN_ON_ONCE(warn && !PageUptodate(page));
666 account_page_dirtied(page, mapping);
667 radix_tree_tag_set(&mapping->page_tree,
668 page_index(page), PAGECACHE_TAG_DIRTY);
670 spin_unlock_irq(&mapping->tree_lock);
671 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
675 * Add a page to the dirty page list.
677 * It is a sad fact of life that this function is called from several places
678 * deeply under spinlocking. It may not sleep.
680 * If the page has buffers, the uptodate buffers are set dirty, to preserve
681 * dirty-state coherency between the page and the buffers. It the page does
682 * not have buffers then when they are later attached they will all be set
683 * dirty.
685 * The buffers are dirtied before the page is dirtied. There's a small race
686 * window in which a writepage caller may see the page cleanness but not the
687 * buffer dirtiness. That's fine. If this code were to set the page dirty
688 * before the buffers, a concurrent writepage caller could clear the page dirty
689 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
690 * page on the dirty page list.
692 * We use private_lock to lock against try_to_free_buffers while using the
693 * page's buffer list. Also use this to protect against clean buffers being
694 * added to the page after it was set dirty.
696 * FIXME: may need to call ->reservepage here as well. That's rather up to the
697 * address_space though.
699 int __set_page_dirty_buffers(struct page *page)
701 int newly_dirty;
702 struct address_space *mapping = page_mapping(page);
704 if (unlikely(!mapping))
705 return !TestSetPageDirty(page);
707 spin_lock(&mapping->private_lock);
708 if (page_has_buffers(page)) {
709 struct buffer_head *head = page_buffers(page);
710 struct buffer_head *bh = head;
712 do {
713 set_buffer_dirty(bh);
714 bh = bh->b_this_page;
715 } while (bh != head);
717 newly_dirty = !TestSetPageDirty(page);
718 spin_unlock(&mapping->private_lock);
720 if (newly_dirty)
721 __set_page_dirty(page, mapping, 1);
722 return newly_dirty;
724 EXPORT_SYMBOL(__set_page_dirty_buffers);
727 * Write out and wait upon a list of buffers.
729 * We have conflicting pressures: we want to make sure that all
730 * initially dirty buffers get waited on, but that any subsequently
731 * dirtied buffers don't. After all, we don't want fsync to last
732 * forever if somebody is actively writing to the file.
734 * Do this in two main stages: first we copy dirty buffers to a
735 * temporary inode list, queueing the writes as we go. Then we clean
736 * up, waiting for those writes to complete.
738 * During this second stage, any subsequent updates to the file may end
739 * up refiling the buffer on the original inode's dirty list again, so
740 * there is a chance we will end up with a buffer queued for write but
741 * not yet completed on that list. So, as a final cleanup we go through
742 * the osync code to catch these locked, dirty buffers without requeuing
743 * any newly dirty buffers for write.
745 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
747 struct buffer_head *bh;
748 struct list_head tmp;
749 struct address_space *mapping;
750 int err = 0, err2;
751 struct blk_plug plug;
753 INIT_LIST_HEAD(&tmp);
754 blk_start_plug(&plug);
756 spin_lock(lock);
757 while (!list_empty(list)) {
758 bh = BH_ENTRY(list->next);
759 mapping = bh->b_assoc_map;
760 __remove_assoc_queue(bh);
761 /* Avoid race with mark_buffer_dirty_inode() which does
762 * a lockless check and we rely on seeing the dirty bit */
763 smp_mb();
764 if (buffer_dirty(bh) || buffer_locked(bh)) {
765 list_add(&bh->b_assoc_buffers, &tmp);
766 bh->b_assoc_map = mapping;
767 if (buffer_dirty(bh)) {
768 get_bh(bh);
769 spin_unlock(lock);
771 * Ensure any pending I/O completes so that
772 * write_dirty_buffer() actually writes the
773 * current contents - it is a noop if I/O is
774 * still in flight on potentially older
775 * contents.
777 write_dirty_buffer(bh, WRITE_SYNC);
780 * Kick off IO for the previous mapping. Note
781 * that we will not run the very last mapping,
782 * wait_on_buffer() will do that for us
783 * through sync_buffer().
785 brelse(bh);
786 spin_lock(lock);
791 spin_unlock(lock);
792 blk_finish_plug(&plug);
793 spin_lock(lock);
795 while (!list_empty(&tmp)) {
796 bh = BH_ENTRY(tmp.prev);
797 get_bh(bh);
798 mapping = bh->b_assoc_map;
799 __remove_assoc_queue(bh);
800 /* Avoid race with mark_buffer_dirty_inode() which does
801 * a lockless check and we rely on seeing the dirty bit */
802 smp_mb();
803 if (buffer_dirty(bh)) {
804 list_add(&bh->b_assoc_buffers,
805 &mapping->private_list);
806 bh->b_assoc_map = mapping;
808 spin_unlock(lock);
809 wait_on_buffer(bh);
810 if (!buffer_uptodate(bh))
811 err = -EIO;
812 brelse(bh);
813 spin_lock(lock);
816 spin_unlock(lock);
817 err2 = osync_buffers_list(lock, list);
818 if (err)
819 return err;
820 else
821 return err2;
825 * Invalidate any and all dirty buffers on a given inode. We are
826 * probably unmounting the fs, but that doesn't mean we have already
827 * done a sync(). Just drop the buffers from the inode list.
829 * NOTE: we take the inode's blockdev's mapping's private_lock. Which
830 * assumes that all the buffers are against the blockdev. Not true
831 * for reiserfs.
833 void invalidate_inode_buffers(struct inode *inode)
835 if (inode_has_buffers(inode)) {
836 struct address_space *mapping = &inode->i_data;
837 struct list_head *list = &mapping->private_list;
838 struct address_space *buffer_mapping = mapping->assoc_mapping;
840 spin_lock(&buffer_mapping->private_lock);
841 while (!list_empty(list))
842 __remove_assoc_queue(BH_ENTRY(list->next));
843 spin_unlock(&buffer_mapping->private_lock);
846 EXPORT_SYMBOL(invalidate_inode_buffers);
849 * Remove any clean buffers from the inode's buffer list. This is called
850 * when we're trying to free the inode itself. Those buffers can pin it.
852 * Returns true if all buffers were removed.
854 int remove_inode_buffers(struct inode *inode)
856 int ret = 1;
858 if (inode_has_buffers(inode)) {
859 struct address_space *mapping = &inode->i_data;
860 struct list_head *list = &mapping->private_list;
861 struct address_space *buffer_mapping = mapping->assoc_mapping;
863 spin_lock(&buffer_mapping->private_lock);
864 while (!list_empty(list)) {
865 struct buffer_head *bh = BH_ENTRY(list->next);
866 if (buffer_dirty(bh)) {
867 ret = 0;
868 break;
870 __remove_assoc_queue(bh);
872 spin_unlock(&buffer_mapping->private_lock);
874 return ret;
878 * Create the appropriate buffers when given a page for data area and
879 * the size of each buffer.. Use the bh->b_this_page linked list to
880 * follow the buffers created. Return NULL if unable to create more
881 * buffers.
883 * The retry flag is used to differentiate async IO (paging, swapping)
884 * which may not fail from ordinary buffer allocations.
886 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
887 int retry)
889 struct buffer_head *bh, *head;
890 long offset;
892 try_again:
893 head = NULL;
894 offset = PAGE_SIZE;
895 while ((offset -= size) >= 0) {
896 bh = alloc_buffer_head(GFP_NOFS);
897 if (!bh)
898 goto no_grow;
900 bh->b_bdev = NULL;
901 bh->b_this_page = head;
902 bh->b_blocknr = -1;
903 head = bh;
905 bh->b_state = 0;
906 atomic_set(&bh->b_count, 0);
907 bh->b_size = size;
909 /* Link the buffer to its page */
910 set_bh_page(bh, page, offset);
912 init_buffer(bh, NULL, NULL);
914 return head;
916 * In case anything failed, we just free everything we got.
918 no_grow:
919 if (head) {
920 do {
921 bh = head;
922 head = head->b_this_page;
923 free_buffer_head(bh);
924 } while (head);
928 * Return failure for non-async IO requests. Async IO requests
929 * are not allowed to fail, so we have to wait until buffer heads
930 * become available. But we don't want tasks sleeping with
931 * partially complete buffers, so all were released above.
933 if (!retry)
934 return NULL;
936 /* We're _really_ low on memory. Now we just
937 * wait for old buffer heads to become free due to
938 * finishing IO. Since this is an async request and
939 * the reserve list is empty, we're sure there are
940 * async buffer heads in use.
942 free_more_memory();
943 goto try_again;
945 EXPORT_SYMBOL_GPL(alloc_page_buffers);
947 static inline void
948 link_dev_buffers(struct page *page, struct buffer_head *head)
950 struct buffer_head *bh, *tail;
952 bh = head;
953 do {
954 tail = bh;
955 bh = bh->b_this_page;
956 } while (bh);
957 tail->b_this_page = head;
958 attach_page_buffers(page, head);
962 * Initialise the state of a blockdev page's buffers.
964 static void
965 init_page_buffers(struct page *page, struct block_device *bdev,
966 sector_t block, int size)
968 struct buffer_head *head = page_buffers(page);
969 struct buffer_head *bh = head;
970 int uptodate = PageUptodate(page);
972 do {
973 if (!buffer_mapped(bh)) {
974 init_buffer(bh, NULL, NULL);
975 bh->b_bdev = bdev;
976 bh->b_blocknr = block;
977 if (uptodate)
978 set_buffer_uptodate(bh);
979 set_buffer_mapped(bh);
981 block++;
982 bh = bh->b_this_page;
983 } while (bh != head);
987 * Create the page-cache page that contains the requested block.
989 * This is user purely for blockdev mappings.
991 static struct page *
992 grow_dev_page(struct block_device *bdev, sector_t block,
993 pgoff_t index, int size)
995 struct inode *inode = bdev->bd_inode;
996 struct page *page;
997 struct buffer_head *bh;
999 page = find_or_create_page(inode->i_mapping, index,
1000 (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
1001 if (!page)
1002 return NULL;
1004 BUG_ON(!PageLocked(page));
1006 if (page_has_buffers(page)) {
1007 bh = page_buffers(page);
1008 if (bh->b_size == size) {
1009 init_page_buffers(page, bdev, block, size);
1010 return page;
1012 if (!try_to_free_buffers(page))
1013 goto failed;
1017 * Allocate some buffers for this page
1019 bh = alloc_page_buffers(page, size, 0);
1020 if (!bh)
1021 goto failed;
1024 * Link the page to the buffers and initialise them. Take the
1025 * lock to be atomic wrt __find_get_block(), which does not
1026 * run under the page lock.
1028 spin_lock(&inode->i_mapping->private_lock);
1029 link_dev_buffers(page, bh);
1030 init_page_buffers(page, bdev, block, size);
1031 spin_unlock(&inode->i_mapping->private_lock);
1032 return page;
1034 failed:
1035 BUG();
1036 unlock_page(page);
1037 page_cache_release(page);
1038 return NULL;
1042 * Create buffers for the specified block device block's page. If
1043 * that page was dirty, the buffers are set dirty also.
1045 static int
1046 grow_buffers(struct block_device *bdev, sector_t block, int size)
1048 struct page *page;
1049 pgoff_t index;
1050 int sizebits;
1052 sizebits = -1;
1053 do {
1054 sizebits++;
1055 } while ((size << sizebits) < PAGE_SIZE);
1057 index = block >> sizebits;
1060 * Check for a block which wants to lie outside our maximum possible
1061 * pagecache index. (this comparison is done using sector_t types).
1063 if (unlikely(index != block >> sizebits)) {
1064 char b[BDEVNAME_SIZE];
1066 printk(KERN_ERR "%s: requested out-of-range block %llu for "
1067 "device %s\n",
1068 __func__, (unsigned long long)block,
1069 bdevname(bdev, b));
1070 return -EIO;
1072 block = index << sizebits;
1073 /* Create a page with the proper size buffers.. */
1074 page = grow_dev_page(bdev, block, index, size);
1075 if (!page)
1076 return 0;
1077 unlock_page(page);
1078 page_cache_release(page);
1079 return 1;
1082 static struct buffer_head *
1083 __getblk_slow(struct block_device *bdev, sector_t block, int size)
1085 /* Size must be multiple of hard sectorsize */
1086 if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
1087 (size < 512 || size > PAGE_SIZE))) {
1088 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1089 size);
1090 printk(KERN_ERR "logical block size: %d\n",
1091 bdev_logical_block_size(bdev));
1093 dump_stack();
1094 return NULL;
1097 for (;;) {
1098 struct buffer_head * bh;
1099 int ret;
1101 bh = __find_get_block(bdev, block, size);
1102 if (bh)
1103 return bh;
1105 ret = grow_buffers(bdev, block, size);
1106 if (ret < 0)
1107 return NULL;
1108 if (ret == 0)
1109 free_more_memory();
1114 * The relationship between dirty buffers and dirty pages:
1116 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1117 * the page is tagged dirty in its radix tree.
1119 * At all times, the dirtiness of the buffers represents the dirtiness of
1120 * subsections of the page. If the page has buffers, the page dirty bit is
1121 * merely a hint about the true dirty state.
1123 * When a page is set dirty in its entirety, all its buffers are marked dirty
1124 * (if the page has buffers).
1126 * When a buffer is marked dirty, its page is dirtied, but the page's other
1127 * buffers are not.
1129 * Also. When blockdev buffers are explicitly read with bread(), they
1130 * individually become uptodate. But their backing page remains not
1131 * uptodate - even if all of its buffers are uptodate. A subsequent
1132 * block_read_full_page() against that page will discover all the uptodate
1133 * buffers, will set the page uptodate and will perform no I/O.
1137 * mark_buffer_dirty - mark a buffer_head as needing writeout
1138 * @bh: the buffer_head to mark dirty
1140 * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1141 * backing page dirty, then tag the page as dirty in its address_space's radix
1142 * tree and then attach the address_space's inode to its superblock's dirty
1143 * inode list.
1145 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1146 * mapping->tree_lock and mapping->host->i_lock.
1148 void mark_buffer_dirty(struct buffer_head *bh)
1150 WARN_ON_ONCE(!buffer_uptodate(bh));
1153 * Very *carefully* optimize the it-is-already-dirty case.
1155 * Don't let the final "is it dirty" escape to before we
1156 * perhaps modified the buffer.
1158 if (buffer_dirty(bh)) {
1159 smp_mb();
1160 if (buffer_dirty(bh))
1161 return;
1164 if (!test_set_buffer_dirty(bh)) {
1165 struct page *page = bh->b_page;
1166 if (!TestSetPageDirty(page)) {
1167 struct address_space *mapping = page_mapping(page);
1168 if (mapping)
1169 __set_page_dirty(page, mapping, 0);
1173 EXPORT_SYMBOL(mark_buffer_dirty);
1176 * Decrement a buffer_head's reference count. If all buffers against a page
1177 * have zero reference count, are clean and unlocked, and if the page is clean
1178 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1179 * in preparation for freeing it (sometimes, rarely, buffers are removed from
1180 * a page but it ends up not being freed, and buffers may later be reattached).
1182 void __brelse(struct buffer_head * buf)
1184 if (atomic_read(&buf->b_count)) {
1185 put_bh(buf);
1186 return;
1188 WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1190 EXPORT_SYMBOL(__brelse);
1193 * bforget() is like brelse(), except it discards any
1194 * potentially dirty data.
1196 void __bforget(struct buffer_head *bh)
1198 clear_buffer_dirty(bh);
1199 if (bh->b_assoc_map) {
1200 struct address_space *buffer_mapping = bh->b_page->mapping;
1202 spin_lock(&buffer_mapping->private_lock);
1203 list_del_init(&bh->b_assoc_buffers);
1204 bh->b_assoc_map = NULL;
1205 spin_unlock(&buffer_mapping->private_lock);
1207 __brelse(bh);
1209 EXPORT_SYMBOL(__bforget);
1211 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1213 lock_buffer(bh);
1214 if (buffer_uptodate(bh)) {
1215 unlock_buffer(bh);
1216 return bh;
1217 } else {
1218 get_bh(bh);
1219 bh->b_end_io = end_buffer_read_sync;
1220 submit_bh(READ, bh);
1221 wait_on_buffer(bh);
1222 if (buffer_uptodate(bh))
1223 return bh;
1225 brelse(bh);
1226 return NULL;
1230 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1231 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1232 * refcount elevated by one when they're in an LRU. A buffer can only appear
1233 * once in a particular CPU's LRU. A single buffer can be present in multiple
1234 * CPU's LRUs at the same time.
1236 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1237 * sb_find_get_block().
1239 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1240 * a local interrupt disable for that.
1243 #define BH_LRU_SIZE 8
1245 struct bh_lru {
1246 struct buffer_head *bhs[BH_LRU_SIZE];
1249 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1251 #ifdef CONFIG_SMP
1252 #define bh_lru_lock() local_irq_disable()
1253 #define bh_lru_unlock() local_irq_enable()
1254 #else
1255 #define bh_lru_lock() preempt_disable()
1256 #define bh_lru_unlock() preempt_enable()
1257 #endif
1259 static inline void check_irqs_on(void)
1261 #ifdef irqs_disabled
1262 BUG_ON(irqs_disabled());
1263 #endif
1267 * The LRU management algorithm is dopey-but-simple. Sorry.
1269 static void bh_lru_install(struct buffer_head *bh)
1271 struct buffer_head *evictee = NULL;
1273 check_irqs_on();
1274 bh_lru_lock();
1275 if (__this_cpu_read(bh_lrus.bhs[0]) != bh) {
1276 struct buffer_head *bhs[BH_LRU_SIZE];
1277 int in;
1278 int out = 0;
1280 get_bh(bh);
1281 bhs[out++] = bh;
1282 for (in = 0; in < BH_LRU_SIZE; in++) {
1283 struct buffer_head *bh2 =
1284 __this_cpu_read(bh_lrus.bhs[in]);
1286 if (bh2 == bh) {
1287 __brelse(bh2);
1288 } else {
1289 if (out >= BH_LRU_SIZE) {
1290 BUG_ON(evictee != NULL);
1291 evictee = bh2;
1292 } else {
1293 bhs[out++] = bh2;
1297 while (out < BH_LRU_SIZE)
1298 bhs[out++] = NULL;
1299 memcpy(__this_cpu_ptr(&bh_lrus.bhs), bhs, sizeof(bhs));
1301 bh_lru_unlock();
1303 if (evictee)
1304 __brelse(evictee);
1308 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1310 static struct buffer_head *
1311 lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1313 struct buffer_head *ret = NULL;
1314 unsigned int i;
1316 check_irqs_on();
1317 bh_lru_lock();
1318 for (i = 0; i < BH_LRU_SIZE; i++) {
1319 struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]);
1321 if (bh && bh->b_bdev == bdev &&
1322 bh->b_blocknr == block && bh->b_size == size) {
1323 if (i) {
1324 while (i) {
1325 __this_cpu_write(bh_lrus.bhs[i],
1326 __this_cpu_read(bh_lrus.bhs[i - 1]));
1327 i--;
1329 __this_cpu_write(bh_lrus.bhs[0], bh);
1331 get_bh(bh);
1332 ret = bh;
1333 break;
1336 bh_lru_unlock();
1337 return ret;
1341 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1342 * it in the LRU and mark it as accessed. If it is not present then return
1343 * NULL
1345 struct buffer_head *
1346 __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1348 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1350 if (bh == NULL) {
1351 bh = __find_get_block_slow(bdev, block);
1352 if (bh)
1353 bh_lru_install(bh);
1355 if (bh)
1356 touch_buffer(bh);
1357 return bh;
1359 EXPORT_SYMBOL(__find_get_block);
1362 * __getblk will locate (and, if necessary, create) the buffer_head
1363 * which corresponds to the passed block_device, block and size. The
1364 * returned buffer has its reference count incremented.
1366 * __getblk() cannot fail - it just keeps trying. If you pass it an
1367 * illegal block number, __getblk() will happily return a buffer_head
1368 * which represents the non-existent block. Very weird.
1370 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1371 * attempt is failing. FIXME, perhaps?
1373 struct buffer_head *
1374 __getblk(struct block_device *bdev, sector_t block, unsigned size)
1376 struct buffer_head *bh = __find_get_block(bdev, block, size);
1378 might_sleep();
1379 if (bh == NULL)
1380 bh = __getblk_slow(bdev, block, size);
1381 return bh;
1383 EXPORT_SYMBOL(__getblk);
1386 * Do async read-ahead on a buffer..
1388 void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1390 struct buffer_head *bh = __getblk(bdev, block, size);
1391 if (likely(bh)) {
1392 ll_rw_block(READA, 1, &bh);
1393 brelse(bh);
1396 EXPORT_SYMBOL(__breadahead);
1399 * __bread() - reads a specified block and returns the bh
1400 * @bdev: the block_device to read from
1401 * @block: number of block
1402 * @size: size (in bytes) to read
1404 * Reads a specified block, and returns buffer head that contains it.
1405 * It returns NULL if the block was unreadable.
1407 struct buffer_head *
1408 __bread(struct block_device *bdev, sector_t block, unsigned size)
1410 struct buffer_head *bh = __getblk(bdev, block, size);
1412 if (likely(bh) && !buffer_uptodate(bh))
1413 bh = __bread_slow(bh);
1414 return bh;
1416 EXPORT_SYMBOL(__bread);
1419 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1420 * This doesn't race because it runs in each cpu either in irq
1421 * or with preempt disabled.
1423 static void invalidate_bh_lru(void *arg)
1425 struct bh_lru *b = &get_cpu_var(bh_lrus);
1426 int i;
1428 for (i = 0; i < BH_LRU_SIZE; i++) {
1429 brelse(b->bhs[i]);
1430 b->bhs[i] = NULL;
1432 put_cpu_var(bh_lrus);
1435 void invalidate_bh_lrus(void)
1437 on_each_cpu(invalidate_bh_lru, NULL, 1);
1439 EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
1441 void set_bh_page(struct buffer_head *bh,
1442 struct page *page, unsigned long offset)
1444 bh->b_page = page;
1445 BUG_ON(offset >= PAGE_SIZE);
1446 if (PageHighMem(page))
1448 * This catches illegal uses and preserves the offset:
1450 bh->b_data = (char *)(0 + offset);
1451 else
1452 bh->b_data = page_address(page) + offset;
1454 EXPORT_SYMBOL(set_bh_page);
1457 * Called when truncating a buffer on a page completely.
1459 static void discard_buffer(struct buffer_head * bh)
1461 lock_buffer(bh);
1462 clear_buffer_dirty(bh);
1463 bh->b_bdev = NULL;
1464 clear_buffer_mapped(bh);
1465 clear_buffer_req(bh);
1466 clear_buffer_new(bh);
1467 clear_buffer_delay(bh);
1468 clear_buffer_unwritten(bh);
1469 unlock_buffer(bh);
1473 * block_invalidatepage - invalidate part of all of a buffer-backed page
1475 * @page: the page which is affected
1476 * @offset: the index of the truncation point
1478 * block_invalidatepage() is called when all or part of the page has become
1479 * invalidatedby a truncate operation.
1481 * block_invalidatepage() does not have to release all buffers, but it must
1482 * ensure that no dirty buffer is left outside @offset and that no I/O
1483 * is underway against any of the blocks which are outside the truncation
1484 * point. Because the caller is about to free (and possibly reuse) those
1485 * blocks on-disk.
1487 void block_invalidatepage(struct page *page, unsigned long offset)
1489 struct buffer_head *head, *bh, *next;
1490 unsigned int curr_off = 0;
1492 BUG_ON(!PageLocked(page));
1493 if (!page_has_buffers(page))
1494 goto out;
1496 head = page_buffers(page);
1497 bh = head;
1498 do {
1499 unsigned int next_off = curr_off + bh->b_size;
1500 next = bh->b_this_page;
1503 * is this block fully invalidated?
1505 if (offset <= curr_off)
1506 discard_buffer(bh);
1507 curr_off = next_off;
1508 bh = next;
1509 } while (bh != head);
1512 * We release buffers only if the entire page is being invalidated.
1513 * The get_block cached value has been unconditionally invalidated,
1514 * so real IO is not possible anymore.
1516 if (offset == 0)
1517 try_to_release_page(page, 0);
1518 out:
1519 return;
1521 EXPORT_SYMBOL(block_invalidatepage);
1524 * We attach and possibly dirty the buffers atomically wrt
1525 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
1526 * is already excluded via the page lock.
1528 void create_empty_buffers(struct page *page,
1529 unsigned long blocksize, unsigned long b_state)
1531 struct buffer_head *bh, *head, *tail;
1533 head = alloc_page_buffers(page, blocksize, 1);
1534 bh = head;
1535 do {
1536 bh->b_state |= b_state;
1537 tail = bh;
1538 bh = bh->b_this_page;
1539 } while (bh);
1540 tail->b_this_page = head;
1542 spin_lock(&page->mapping->private_lock);
1543 if (PageUptodate(page) || PageDirty(page)) {
1544 bh = head;
1545 do {
1546 if (PageDirty(page))
1547 set_buffer_dirty(bh);
1548 if (PageUptodate(page))
1549 set_buffer_uptodate(bh);
1550 bh = bh->b_this_page;
1551 } while (bh != head);
1553 attach_page_buffers(page, head);
1554 spin_unlock(&page->mapping->private_lock);
1556 EXPORT_SYMBOL(create_empty_buffers);
1559 * We are taking a block for data and we don't want any output from any
1560 * buffer-cache aliases starting from return from that function and
1561 * until the moment when something will explicitly mark the buffer
1562 * dirty (hopefully that will not happen until we will free that block ;-)
1563 * We don't even need to mark it not-uptodate - nobody can expect
1564 * anything from a newly allocated buffer anyway. We used to used
1565 * unmap_buffer() for such invalidation, but that was wrong. We definitely
1566 * don't want to mark the alias unmapped, for example - it would confuse
1567 * anyone who might pick it with bread() afterwards...
1569 * Also.. Note that bforget() doesn't lock the buffer. So there can
1570 * be writeout I/O going on against recently-freed buffers. We don't
1571 * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1572 * only if we really need to. That happens here.
1574 void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1576 struct buffer_head *old_bh;
1578 might_sleep();
1580 old_bh = __find_get_block_slow(bdev, block);
1581 if (old_bh) {
1582 clear_buffer_dirty(old_bh);
1583 wait_on_buffer(old_bh);
1584 clear_buffer_req(old_bh);
1585 __brelse(old_bh);
1588 EXPORT_SYMBOL(unmap_underlying_metadata);
1591 * NOTE! All mapped/uptodate combinations are valid:
1593 * Mapped Uptodate Meaning
1595 * No No "unknown" - must do get_block()
1596 * No Yes "hole" - zero-filled
1597 * Yes No "allocated" - allocated on disk, not read in
1598 * Yes Yes "valid" - allocated and up-to-date in memory.
1600 * "Dirty" is valid only with the last case (mapped+uptodate).
1604 * While block_write_full_page is writing back the dirty buffers under
1605 * the page lock, whoever dirtied the buffers may decide to clean them
1606 * again at any time. We handle that by only looking at the buffer
1607 * state inside lock_buffer().
1609 * If block_write_full_page() is called for regular writeback
1610 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1611 * locked buffer. This only can happen if someone has written the buffer
1612 * directly, with submit_bh(). At the address_space level PageWriteback
1613 * prevents this contention from occurring.
1615 * If block_write_full_page() is called with wbc->sync_mode ==
1616 * WB_SYNC_ALL, the writes are posted using WRITE_SYNC; this
1617 * causes the writes to be flagged as synchronous writes.
1619 static int __block_write_full_page(struct inode *inode, struct page *page,
1620 get_block_t *get_block, struct writeback_control *wbc,
1621 bh_end_io_t *handler)
1623 int err;
1624 sector_t block;
1625 sector_t last_block;
1626 struct buffer_head *bh, *head;
1627 const unsigned blocksize = 1 << inode->i_blkbits;
1628 int nr_underway = 0;
1629 int write_op = (wbc->sync_mode == WB_SYNC_ALL ?
1630 WRITE_SYNC : WRITE);
1632 BUG_ON(!PageLocked(page));
1634 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1636 if (!page_has_buffers(page)) {
1637 create_empty_buffers(page, blocksize,
1638 (1 << BH_Dirty)|(1 << BH_Uptodate));
1642 * Be very careful. We have no exclusion from __set_page_dirty_buffers
1643 * here, and the (potentially unmapped) buffers may become dirty at
1644 * any time. If a buffer becomes dirty here after we've inspected it
1645 * then we just miss that fact, and the page stays dirty.
1647 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1648 * handle that here by just cleaning them.
1651 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1652 head = page_buffers(page);
1653 bh = head;
1656 * Get all the dirty buffers mapped to disk addresses and
1657 * handle any aliases from the underlying blockdev's mapping.
1659 do {
1660 if (block > last_block) {
1662 * mapped buffers outside i_size will occur, because
1663 * this page can be outside i_size when there is a
1664 * truncate in progress.
1667 * The buffer was zeroed by block_write_full_page()
1669 clear_buffer_dirty(bh);
1670 set_buffer_uptodate(bh);
1671 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1672 buffer_dirty(bh)) {
1673 WARN_ON(bh->b_size != blocksize);
1674 err = get_block(inode, block, bh, 1);
1675 if (err)
1676 goto recover;
1677 clear_buffer_delay(bh);
1678 if (buffer_new(bh)) {
1679 /* blockdev mappings never come here */
1680 clear_buffer_new(bh);
1681 unmap_underlying_metadata(bh->b_bdev,
1682 bh->b_blocknr);
1685 bh = bh->b_this_page;
1686 block++;
1687 } while (bh != head);
1689 do {
1690 if (!buffer_mapped(bh))
1691 continue;
1693 * If it's a fully non-blocking write attempt and we cannot
1694 * lock the buffer then redirty the page. Note that this can
1695 * potentially cause a busy-wait loop from writeback threads
1696 * and kswapd activity, but those code paths have their own
1697 * higher-level throttling.
1699 if (wbc->sync_mode != WB_SYNC_NONE) {
1700 lock_buffer(bh);
1701 } else if (!trylock_buffer(bh)) {
1702 redirty_page_for_writepage(wbc, page);
1703 continue;
1705 if (test_clear_buffer_dirty(bh)) {
1706 mark_buffer_async_write_endio(bh, handler);
1707 } else {
1708 unlock_buffer(bh);
1710 } while ((bh = bh->b_this_page) != head);
1713 * The page and its buffers are protected by PageWriteback(), so we can
1714 * drop the bh refcounts early.
1716 BUG_ON(PageWriteback(page));
1717 set_page_writeback(page);
1719 do {
1720 struct buffer_head *next = bh->b_this_page;
1721 if (buffer_async_write(bh)) {
1722 submit_bh(write_op, bh);
1723 nr_underway++;
1725 bh = next;
1726 } while (bh != head);
1727 unlock_page(page);
1729 err = 0;
1730 done:
1731 if (nr_underway == 0) {
1733 * The page was marked dirty, but the buffers were
1734 * clean. Someone wrote them back by hand with
1735 * ll_rw_block/submit_bh. A rare case.
1737 end_page_writeback(page);
1740 * The page and buffer_heads can be released at any time from
1741 * here on.
1744 return err;
1746 recover:
1748 * ENOSPC, or some other error. We may already have added some
1749 * blocks to the file, so we need to write these out to avoid
1750 * exposing stale data.
1751 * The page is currently locked and not marked for writeback
1753 bh = head;
1754 /* Recovery: lock and submit the mapped buffers */
1755 do {
1756 if (buffer_mapped(bh) && buffer_dirty(bh) &&
1757 !buffer_delay(bh)) {
1758 lock_buffer(bh);
1759 mark_buffer_async_write_endio(bh, handler);
1760 } else {
1762 * The buffer may have been set dirty during
1763 * attachment to a dirty page.
1765 clear_buffer_dirty(bh);
1767 } while ((bh = bh->b_this_page) != head);
1768 SetPageError(page);
1769 BUG_ON(PageWriteback(page));
1770 mapping_set_error(page->mapping, err);
1771 set_page_writeback(page);
1772 do {
1773 struct buffer_head *next = bh->b_this_page;
1774 if (buffer_async_write(bh)) {
1775 clear_buffer_dirty(bh);
1776 submit_bh(write_op, bh);
1777 nr_underway++;
1779 bh = next;
1780 } while (bh != head);
1781 unlock_page(page);
1782 goto done;
1786 * If a page has any new buffers, zero them out here, and mark them uptodate
1787 * and dirty so they'll be written out (in order to prevent uninitialised
1788 * block data from leaking). And clear the new bit.
1790 void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1792 unsigned int block_start, block_end;
1793 struct buffer_head *head, *bh;
1795 BUG_ON(!PageLocked(page));
1796 if (!page_has_buffers(page))
1797 return;
1799 bh = head = page_buffers(page);
1800 block_start = 0;
1801 do {
1802 block_end = block_start + bh->b_size;
1804 if (buffer_new(bh)) {
1805 if (block_end > from && block_start < to) {
1806 if (!PageUptodate(page)) {
1807 unsigned start, size;
1809 start = max(from, block_start);
1810 size = min(to, block_end) - start;
1812 zero_user(page, start, size);
1813 set_buffer_uptodate(bh);
1816 clear_buffer_new(bh);
1817 mark_buffer_dirty(bh);
1821 block_start = block_end;
1822 bh = bh->b_this_page;
1823 } while (bh != head);
1825 EXPORT_SYMBOL(page_zero_new_buffers);
1827 int __block_write_begin(struct page *page, loff_t pos, unsigned len,
1828 get_block_t *get_block)
1830 unsigned from = pos & (PAGE_CACHE_SIZE - 1);
1831 unsigned to = from + len;
1832 struct inode *inode = page->mapping->host;
1833 unsigned block_start, block_end;
1834 sector_t block;
1835 int err = 0;
1836 unsigned blocksize, bbits;
1837 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1839 BUG_ON(!PageLocked(page));
1840 BUG_ON(from > PAGE_CACHE_SIZE);
1841 BUG_ON(to > PAGE_CACHE_SIZE);
1842 BUG_ON(from > to);
1844 blocksize = 1 << inode->i_blkbits;
1845 if (!page_has_buffers(page))
1846 create_empty_buffers(page, blocksize, 0);
1847 head = page_buffers(page);
1849 bbits = inode->i_blkbits;
1850 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1852 for(bh = head, block_start = 0; bh != head || !block_start;
1853 block++, block_start=block_end, bh = bh->b_this_page) {
1854 block_end = block_start + blocksize;
1855 if (block_end <= from || block_start >= to) {
1856 if (PageUptodate(page)) {
1857 if (!buffer_uptodate(bh))
1858 set_buffer_uptodate(bh);
1860 continue;
1862 if (buffer_new(bh))
1863 clear_buffer_new(bh);
1864 if (!buffer_mapped(bh)) {
1865 WARN_ON(bh->b_size != blocksize);
1866 err = get_block(inode, block, bh, 1);
1867 if (err)
1868 break;
1869 if (buffer_new(bh)) {
1870 unmap_underlying_metadata(bh->b_bdev,
1871 bh->b_blocknr);
1872 if (PageUptodate(page)) {
1873 clear_buffer_new(bh);
1874 set_buffer_uptodate(bh);
1875 mark_buffer_dirty(bh);
1876 continue;
1878 if (block_end > to || block_start < from)
1879 zero_user_segments(page,
1880 to, block_end,
1881 block_start, from);
1882 continue;
1885 if (PageUptodate(page)) {
1886 if (!buffer_uptodate(bh))
1887 set_buffer_uptodate(bh);
1888 continue;
1890 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1891 !buffer_unwritten(bh) &&
1892 (block_start < from || block_end > to)) {
1893 ll_rw_block(READ, 1, &bh);
1894 *wait_bh++=bh;
1898 * If we issued read requests - let them complete.
1900 while(wait_bh > wait) {
1901 wait_on_buffer(*--wait_bh);
1902 if (!buffer_uptodate(*wait_bh))
1903 err = -EIO;
1905 if (unlikely(err)) {
1906 page_zero_new_buffers(page, from, to);
1907 ClearPageUptodate(page);
1909 return err;
1911 EXPORT_SYMBOL(__block_write_begin);
1913 static int __block_commit_write(struct inode *inode, struct page *page,
1914 unsigned from, unsigned to)
1916 unsigned block_start, block_end;
1917 int partial = 0;
1918 unsigned blocksize;
1919 struct buffer_head *bh, *head;
1921 blocksize = 1 << inode->i_blkbits;
1923 for(bh = head = page_buffers(page), block_start = 0;
1924 bh != head || !block_start;
1925 block_start=block_end, bh = bh->b_this_page) {
1926 block_end = block_start + blocksize;
1927 if (block_end <= from || block_start >= to) {
1928 if (!buffer_uptodate(bh))
1929 partial = 1;
1930 } else {
1931 set_buffer_uptodate(bh);
1932 mark_buffer_dirty(bh);
1934 clear_buffer_new(bh);
1938 * If this is a partial write which happened to make all buffers
1939 * uptodate then we can optimize away a bogus readpage() for
1940 * the next read(). Here we 'discover' whether the page went
1941 * uptodate as a result of this (potentially partial) write.
1943 if (!partial)
1944 SetPageUptodate(page);
1945 return 0;
1949 * block_write_begin takes care of the basic task of block allocation and
1950 * bringing partial write blocks uptodate first.
1952 * The filesystem needs to handle block truncation upon failure.
1954 int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
1955 unsigned flags, struct page **pagep, get_block_t *get_block)
1957 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1958 struct page *page;
1959 int status;
1961 page = grab_cache_page_write_begin(mapping, index, flags);
1962 if (!page)
1963 return -ENOMEM;
1965 status = __block_write_begin(page, pos, len, get_block);
1966 if (unlikely(status)) {
1967 unlock_page(page);
1968 page_cache_release(page);
1969 page = NULL;
1972 *pagep = page;
1973 return status;
1975 EXPORT_SYMBOL(block_write_begin);
1977 int block_write_end(struct file *file, struct address_space *mapping,
1978 loff_t pos, unsigned len, unsigned copied,
1979 struct page *page, void *fsdata)
1981 struct inode *inode = mapping->host;
1982 unsigned start;
1984 start = pos & (PAGE_CACHE_SIZE - 1);
1986 if (unlikely(copied < len)) {
1988 * The buffers that were written will now be uptodate, so we
1989 * don't have to worry about a readpage reading them and
1990 * overwriting a partial write. However if we have encountered
1991 * a short write and only partially written into a buffer, it
1992 * will not be marked uptodate, so a readpage might come in and
1993 * destroy our partial write.
1995 * Do the simplest thing, and just treat any short write to a
1996 * non uptodate page as a zero-length write, and force the
1997 * caller to redo the whole thing.
1999 if (!PageUptodate(page))
2000 copied = 0;
2002 page_zero_new_buffers(page, start+copied, start+len);
2004 flush_dcache_page(page);
2006 /* This could be a short (even 0-length) commit */
2007 __block_commit_write(inode, page, start, start+copied);
2009 return copied;
2011 EXPORT_SYMBOL(block_write_end);
2013 int generic_write_end(struct file *file, struct address_space *mapping,
2014 loff_t pos, unsigned len, unsigned copied,
2015 struct page *page, void *fsdata)
2017 struct inode *inode = mapping->host;
2018 int i_size_changed = 0;
2020 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2023 * No need to use i_size_read() here, the i_size
2024 * cannot change under us because we hold i_mutex.
2026 * But it's important to update i_size while still holding page lock:
2027 * page writeout could otherwise come in and zero beyond i_size.
2029 if (pos+copied > inode->i_size) {
2030 i_size_write(inode, pos+copied);
2031 i_size_changed = 1;
2034 unlock_page(page);
2035 page_cache_release(page);
2038 * Don't mark the inode dirty under page lock. First, it unnecessarily
2039 * makes the holding time of page lock longer. Second, it forces lock
2040 * ordering of page lock and transaction start for journaling
2041 * filesystems.
2043 if (i_size_changed)
2044 mark_inode_dirty(inode);
2046 return copied;
2048 EXPORT_SYMBOL(generic_write_end);
2051 * block_is_partially_uptodate checks whether buffers within a page are
2052 * uptodate or not.
2054 * Returns true if all buffers which correspond to a file portion
2055 * we want to read are uptodate.
2057 int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
2058 unsigned long from)
2060 struct inode *inode = page->mapping->host;
2061 unsigned block_start, block_end, blocksize;
2062 unsigned to;
2063 struct buffer_head *bh, *head;
2064 int ret = 1;
2066 if (!page_has_buffers(page))
2067 return 0;
2069 blocksize = 1 << inode->i_blkbits;
2070 to = min_t(unsigned, PAGE_CACHE_SIZE - from, desc->count);
2071 to = from + to;
2072 if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize)
2073 return 0;
2075 head = page_buffers(page);
2076 bh = head;
2077 block_start = 0;
2078 do {
2079 block_end = block_start + blocksize;
2080 if (block_end > from && block_start < to) {
2081 if (!buffer_uptodate(bh)) {
2082 ret = 0;
2083 break;
2085 if (block_end >= to)
2086 break;
2088 block_start = block_end;
2089 bh = bh->b_this_page;
2090 } while (bh != head);
2092 return ret;
2094 EXPORT_SYMBOL(block_is_partially_uptodate);
2097 * Generic "read page" function for block devices that have the normal
2098 * get_block functionality. This is most of the block device filesystems.
2099 * Reads the page asynchronously --- the unlock_buffer() and
2100 * set/clear_buffer_uptodate() functions propagate buffer state into the
2101 * page struct once IO has completed.
2103 int block_read_full_page(struct page *page, get_block_t *get_block)
2105 struct inode *inode = page->mapping->host;
2106 sector_t iblock, lblock;
2107 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2108 unsigned int blocksize;
2109 int nr, i;
2110 int fully_mapped = 1;
2112 BUG_ON(!PageLocked(page));
2113 blocksize = 1 << inode->i_blkbits;
2114 if (!page_has_buffers(page))
2115 create_empty_buffers(page, blocksize, 0);
2116 head = page_buffers(page);
2118 iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2119 lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2120 bh = head;
2121 nr = 0;
2122 i = 0;
2124 do {
2125 if (buffer_uptodate(bh))
2126 continue;
2128 if (!buffer_mapped(bh)) {
2129 int err = 0;
2131 fully_mapped = 0;
2132 if (iblock < lblock) {
2133 WARN_ON(bh->b_size != blocksize);
2134 err = get_block(inode, iblock, bh, 0);
2135 if (err)
2136 SetPageError(page);
2138 if (!buffer_mapped(bh)) {
2139 zero_user(page, i * blocksize, blocksize);
2140 if (!err)
2141 set_buffer_uptodate(bh);
2142 continue;
2145 * get_block() might have updated the buffer
2146 * synchronously
2148 if (buffer_uptodate(bh))
2149 continue;
2151 arr[nr++] = bh;
2152 } while (i++, iblock++, (bh = bh->b_this_page) != head);
2154 if (fully_mapped)
2155 SetPageMappedToDisk(page);
2157 if (!nr) {
2159 * All buffers are uptodate - we can set the page uptodate
2160 * as well. But not if get_block() returned an error.
2162 if (!PageError(page))
2163 SetPageUptodate(page);
2164 unlock_page(page);
2165 return 0;
2168 /* Stage two: lock the buffers */
2169 for (i = 0; i < nr; i++) {
2170 bh = arr[i];
2171 lock_buffer(bh);
2172 mark_buffer_async_read(bh);
2176 * Stage 3: start the IO. Check for uptodateness
2177 * inside the buffer lock in case another process reading
2178 * the underlying blockdev brought it uptodate (the sct fix).
2180 for (i = 0; i < nr; i++) {
2181 bh = arr[i];
2182 if (buffer_uptodate(bh))
2183 end_buffer_async_read(bh, 1);
2184 else
2185 submit_bh(READ, bh);
2187 return 0;
2189 EXPORT_SYMBOL(block_read_full_page);
2191 /* utility function for filesystems that need to do work on expanding
2192 * truncates. Uses filesystem pagecache writes to allow the filesystem to
2193 * deal with the hole.
2195 int generic_cont_expand_simple(struct inode *inode, loff_t size)
2197 struct address_space *mapping = inode->i_mapping;
2198 struct page *page;
2199 void *fsdata;
2200 int err;
2202 err = inode_newsize_ok(inode, size);
2203 if (err)
2204 goto out;
2206 err = pagecache_write_begin(NULL, mapping, size, 0,
2207 AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND,
2208 &page, &fsdata);
2209 if (err)
2210 goto out;
2212 err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
2213 BUG_ON(err > 0);
2215 out:
2216 return err;
2218 EXPORT_SYMBOL(generic_cont_expand_simple);
2220 static int cont_expand_zero(struct file *file, struct address_space *mapping,
2221 loff_t pos, loff_t *bytes)
2223 struct inode *inode = mapping->host;
2224 unsigned blocksize = 1 << inode->i_blkbits;
2225 struct page *page;
2226 void *fsdata;
2227 pgoff_t index, curidx;
2228 loff_t curpos;
2229 unsigned zerofrom, offset, len;
2230 int err = 0;
2232 index = pos >> PAGE_CACHE_SHIFT;
2233 offset = pos & ~PAGE_CACHE_MASK;
2235 while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
2236 zerofrom = curpos & ~PAGE_CACHE_MASK;
2237 if (zerofrom & (blocksize-1)) {
2238 *bytes |= (blocksize-1);
2239 (*bytes)++;
2241 len = PAGE_CACHE_SIZE - zerofrom;
2243 err = pagecache_write_begin(file, mapping, curpos, len,
2244 AOP_FLAG_UNINTERRUPTIBLE,
2245 &page, &fsdata);
2246 if (err)
2247 goto out;
2248 zero_user(page, zerofrom, len);
2249 err = pagecache_write_end(file, mapping, curpos, len, len,
2250 page, fsdata);
2251 if (err < 0)
2252 goto out;
2253 BUG_ON(err != len);
2254 err = 0;
2256 balance_dirty_pages_ratelimited(mapping);
2259 /* page covers the boundary, find the boundary offset */
2260 if (index == curidx) {
2261 zerofrom = curpos & ~PAGE_CACHE_MASK;
2262 /* if we will expand the thing last block will be filled */
2263 if (offset <= zerofrom) {
2264 goto out;
2266 if (zerofrom & (blocksize-1)) {
2267 *bytes |= (blocksize-1);
2268 (*bytes)++;
2270 len = offset - zerofrom;
2272 err = pagecache_write_begin(file, mapping, curpos, len,
2273 AOP_FLAG_UNINTERRUPTIBLE,
2274 &page, &fsdata);
2275 if (err)
2276 goto out;
2277 zero_user(page, zerofrom, len);
2278 err = pagecache_write_end(file, mapping, curpos, len, len,
2279 page, fsdata);
2280 if (err < 0)
2281 goto out;
2282 BUG_ON(err != len);
2283 err = 0;
2285 out:
2286 return err;
2290 * For moronic filesystems that do not allow holes in file.
2291 * We may have to extend the file.
2293 int cont_write_begin(struct file *file, struct address_space *mapping,
2294 loff_t pos, unsigned len, unsigned flags,
2295 struct page **pagep, void **fsdata,
2296 get_block_t *get_block, loff_t *bytes)
2298 struct inode *inode = mapping->host;
2299 unsigned blocksize = 1 << inode->i_blkbits;
2300 unsigned zerofrom;
2301 int err;
2303 err = cont_expand_zero(file, mapping, pos, bytes);
2304 if (err)
2305 return err;
2307 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2308 if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2309 *bytes |= (blocksize-1);
2310 (*bytes)++;
2313 return block_write_begin(mapping, pos, len, flags, pagep, get_block);
2315 EXPORT_SYMBOL(cont_write_begin);
2317 int block_commit_write(struct page *page, unsigned from, unsigned to)
2319 struct inode *inode = page->mapping->host;
2320 __block_commit_write(inode,page,from,to);
2321 return 0;
2323 EXPORT_SYMBOL(block_commit_write);
2326 * block_page_mkwrite() is not allowed to change the file size as it gets
2327 * called from a page fault handler when a page is first dirtied. Hence we must
2328 * be careful to check for EOF conditions here. We set the page up correctly
2329 * for a written page which means we get ENOSPC checking when writing into
2330 * holes and correct delalloc and unwritten extent mapping on filesystems that
2331 * support these features.
2333 * We are not allowed to take the i_mutex here so we have to play games to
2334 * protect against truncate races as the page could now be beyond EOF. Because
2335 * truncate writes the inode size before removing pages, once we have the
2336 * page lock we can determine safely if the page is beyond EOF. If it is not
2337 * beyond EOF, then the page is guaranteed safe against truncation until we
2338 * unlock the page.
2340 * Direct callers of this function should call vfs_check_frozen() so that page
2341 * fault does not busyloop until the fs is thawed.
2343 int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2344 get_block_t get_block)
2346 struct page *page = vmf->page;
2347 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
2348 unsigned long end;
2349 loff_t size;
2350 int ret;
2352 lock_page(page);
2353 size = i_size_read(inode);
2354 if ((page->mapping != inode->i_mapping) ||
2355 (page_offset(page) > size)) {
2356 /* We overload EFAULT to mean page got truncated */
2357 ret = -EFAULT;
2358 goto out_unlock;
2361 /* page is wholly or partially inside EOF */
2362 if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
2363 end = size & ~PAGE_CACHE_MASK;
2364 else
2365 end = PAGE_CACHE_SIZE;
2367 ret = __block_write_begin(page, 0, end, get_block);
2368 if (!ret)
2369 ret = block_commit_write(page, 0, end);
2371 if (unlikely(ret < 0))
2372 goto out_unlock;
2374 * Freezing in progress? We check after the page is marked dirty and
2375 * with page lock held so if the test here fails, we are sure freezing
2376 * code will wait during syncing until the page fault is done - at that
2377 * point page will be dirty and unlocked so freezing code will write it
2378 * and writeprotect it again.
2380 set_page_dirty(page);
2381 if (inode->i_sb->s_frozen != SB_UNFROZEN) {
2382 ret = -EAGAIN;
2383 goto out_unlock;
2385 return 0;
2386 out_unlock:
2387 unlock_page(page);
2388 return ret;
2390 EXPORT_SYMBOL(__block_page_mkwrite);
2392 int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2393 get_block_t get_block)
2395 int ret;
2396 struct super_block *sb = vma->vm_file->f_path.dentry->d_inode->i_sb;
2399 * This check is racy but catches the common case. The check in
2400 * __block_page_mkwrite() is reliable.
2402 vfs_check_frozen(sb, SB_FREEZE_WRITE);
2403 ret = __block_page_mkwrite(vma, vmf, get_block);
2404 return block_page_mkwrite_return(ret);
2406 EXPORT_SYMBOL(block_page_mkwrite);
2409 * nobh_write_begin()'s prereads are special: the buffer_heads are freed
2410 * immediately, while under the page lock. So it needs a special end_io
2411 * handler which does not touch the bh after unlocking it.
2413 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2415 __end_buffer_read_notouch(bh, uptodate);
2419 * Attach the singly-linked list of buffers created by nobh_write_begin, to
2420 * the page (converting it to circular linked list and taking care of page
2421 * dirty races).
2423 static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
2425 struct buffer_head *bh;
2427 BUG_ON(!PageLocked(page));
2429 spin_lock(&page->mapping->private_lock);
2430 bh = head;
2431 do {
2432 if (PageDirty(page))
2433 set_buffer_dirty(bh);
2434 if (!bh->b_this_page)
2435 bh->b_this_page = head;
2436 bh = bh->b_this_page;
2437 } while (bh != head);
2438 attach_page_buffers(page, head);
2439 spin_unlock(&page->mapping->private_lock);
2443 * On entry, the page is fully not uptodate.
2444 * On exit the page is fully uptodate in the areas outside (from,to)
2445 * The filesystem needs to handle block truncation upon failure.
2447 int nobh_write_begin(struct address_space *mapping,
2448 loff_t pos, unsigned len, unsigned flags,
2449 struct page **pagep, void **fsdata,
2450 get_block_t *get_block)
2452 struct inode *inode = mapping->host;
2453 const unsigned blkbits = inode->i_blkbits;
2454 const unsigned blocksize = 1 << blkbits;
2455 struct buffer_head *head, *bh;
2456 struct page *page;
2457 pgoff_t index;
2458 unsigned from, to;
2459 unsigned block_in_page;
2460 unsigned block_start, block_end;
2461 sector_t block_in_file;
2462 int nr_reads = 0;
2463 int ret = 0;
2464 int is_mapped_to_disk = 1;
2466 index = pos >> PAGE_CACHE_SHIFT;
2467 from = pos & (PAGE_CACHE_SIZE - 1);
2468 to = from + len;
2470 page = grab_cache_page_write_begin(mapping, index, flags);
2471 if (!page)
2472 return -ENOMEM;
2473 *pagep = page;
2474 *fsdata = NULL;
2476 if (page_has_buffers(page)) {
2477 ret = __block_write_begin(page, pos, len, get_block);
2478 if (unlikely(ret))
2479 goto out_release;
2480 return ret;
2483 if (PageMappedToDisk(page))
2484 return 0;
2487 * Allocate buffers so that we can keep track of state, and potentially
2488 * attach them to the page if an error occurs. In the common case of
2489 * no error, they will just be freed again without ever being attached
2490 * to the page (which is all OK, because we're under the page lock).
2492 * Be careful: the buffer linked list is a NULL terminated one, rather
2493 * than the circular one we're used to.
2495 head = alloc_page_buffers(page, blocksize, 0);
2496 if (!head) {
2497 ret = -ENOMEM;
2498 goto out_release;
2501 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2504 * We loop across all blocks in the page, whether or not they are
2505 * part of the affected region. This is so we can discover if the
2506 * page is fully mapped-to-disk.
2508 for (block_start = 0, block_in_page = 0, bh = head;
2509 block_start < PAGE_CACHE_SIZE;
2510 block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
2511 int create;
2513 block_end = block_start + blocksize;
2514 bh->b_state = 0;
2515 create = 1;
2516 if (block_start >= to)
2517 create = 0;
2518 ret = get_block(inode, block_in_file + block_in_page,
2519 bh, create);
2520 if (ret)
2521 goto failed;
2522 if (!buffer_mapped(bh))
2523 is_mapped_to_disk = 0;
2524 if (buffer_new(bh))
2525 unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
2526 if (PageUptodate(page)) {
2527 set_buffer_uptodate(bh);
2528 continue;
2530 if (buffer_new(bh) || !buffer_mapped(bh)) {
2531 zero_user_segments(page, block_start, from,
2532 to, block_end);
2533 continue;
2535 if (buffer_uptodate(bh))
2536 continue; /* reiserfs does this */
2537 if (block_start < from || block_end > to) {
2538 lock_buffer(bh);
2539 bh->b_end_io = end_buffer_read_nobh;
2540 submit_bh(READ, bh);
2541 nr_reads++;
2545 if (nr_reads) {
2547 * The page is locked, so these buffers are protected from
2548 * any VM or truncate activity. Hence we don't need to care
2549 * for the buffer_head refcounts.
2551 for (bh = head; bh; bh = bh->b_this_page) {
2552 wait_on_buffer(bh);
2553 if (!buffer_uptodate(bh))
2554 ret = -EIO;
2556 if (ret)
2557 goto failed;
2560 if (is_mapped_to_disk)
2561 SetPageMappedToDisk(page);
2563 *fsdata = head; /* to be released by nobh_write_end */
2565 return 0;
2567 failed:
2568 BUG_ON(!ret);
2570 * Error recovery is a bit difficult. We need to zero out blocks that
2571 * were newly allocated, and dirty them to ensure they get written out.
2572 * Buffers need to be attached to the page at this point, otherwise
2573 * the handling of potential IO errors during writeout would be hard
2574 * (could try doing synchronous writeout, but what if that fails too?)
2576 attach_nobh_buffers(page, head);
2577 page_zero_new_buffers(page, from, to);
2579 out_release:
2580 unlock_page(page);
2581 page_cache_release(page);
2582 *pagep = NULL;
2584 return ret;
2586 EXPORT_SYMBOL(nobh_write_begin);
2588 int nobh_write_end(struct file *file, struct address_space *mapping,
2589 loff_t pos, unsigned len, unsigned copied,
2590 struct page *page, void *fsdata)
2592 struct inode *inode = page->mapping->host;
2593 struct buffer_head *head = fsdata;
2594 struct buffer_head *bh;
2595 BUG_ON(fsdata != NULL && page_has_buffers(page));
2597 if (unlikely(copied < len) && head)
2598 attach_nobh_buffers(page, head);
2599 if (page_has_buffers(page))
2600 return generic_write_end(file, mapping, pos, len,
2601 copied, page, fsdata);
2603 SetPageUptodate(page);
2604 set_page_dirty(page);
2605 if (pos+copied > inode->i_size) {
2606 i_size_write(inode, pos+copied);
2607 mark_inode_dirty(inode);
2610 unlock_page(page);
2611 page_cache_release(page);
2613 while (head) {
2614 bh = head;
2615 head = head->b_this_page;
2616 free_buffer_head(bh);
2619 return copied;
2621 EXPORT_SYMBOL(nobh_write_end);
2624 * nobh_writepage() - based on block_full_write_page() except
2625 * that it tries to operate without attaching bufferheads to
2626 * the page.
2628 int nobh_writepage(struct page *page, get_block_t *get_block,
2629 struct writeback_control *wbc)
2631 struct inode * const inode = page->mapping->host;
2632 loff_t i_size = i_size_read(inode);
2633 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2634 unsigned offset;
2635 int ret;
2637 /* Is the page fully inside i_size? */
2638 if (page->index < end_index)
2639 goto out;
2641 /* Is the page fully outside i_size? (truncate in progress) */
2642 offset = i_size & (PAGE_CACHE_SIZE-1);
2643 if (page->index >= end_index+1 || !offset) {
2645 * The page may have dirty, unmapped buffers. For example,
2646 * they may have been added in ext3_writepage(). Make them
2647 * freeable here, so the page does not leak.
2649 #if 0
2650 /* Not really sure about this - do we need this ? */
2651 if (page->mapping->a_ops->invalidatepage)
2652 page->mapping->a_ops->invalidatepage(page, offset);
2653 #endif
2654 unlock_page(page);
2655 return 0; /* don't care */
2659 * The page straddles i_size. It must be zeroed out on each and every
2660 * writepage invocation because it may be mmapped. "A file is mapped
2661 * in multiples of the page size. For a file that is not a multiple of
2662 * the page size, the remaining memory is zeroed when mapped, and
2663 * writes to that region are not written out to the file."
2665 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2666 out:
2667 ret = mpage_writepage(page, get_block, wbc);
2668 if (ret == -EAGAIN)
2669 ret = __block_write_full_page(inode, page, get_block, wbc,
2670 end_buffer_async_write);
2671 return ret;
2673 EXPORT_SYMBOL(nobh_writepage);
2675 int nobh_truncate_page(struct address_space *mapping,
2676 loff_t from, get_block_t *get_block)
2678 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2679 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2680 unsigned blocksize;
2681 sector_t iblock;
2682 unsigned length, pos;
2683 struct inode *inode = mapping->host;
2684 struct page *page;
2685 struct buffer_head map_bh;
2686 int err;
2688 blocksize = 1 << inode->i_blkbits;
2689 length = offset & (blocksize - 1);
2691 /* Block boundary? Nothing to do */
2692 if (!length)
2693 return 0;
2695 length = blocksize - length;
2696 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2698 page = grab_cache_page(mapping, index);
2699 err = -ENOMEM;
2700 if (!page)
2701 goto out;
2703 if (page_has_buffers(page)) {
2704 has_buffers:
2705 unlock_page(page);
2706 page_cache_release(page);
2707 return block_truncate_page(mapping, from, get_block);
2710 /* Find the buffer that contains "offset" */
2711 pos = blocksize;
2712 while (offset >= pos) {
2713 iblock++;
2714 pos += blocksize;
2717 map_bh.b_size = blocksize;
2718 map_bh.b_state = 0;
2719 err = get_block(inode, iblock, &map_bh, 0);
2720 if (err)
2721 goto unlock;
2722 /* unmapped? It's a hole - nothing to do */
2723 if (!buffer_mapped(&map_bh))
2724 goto unlock;
2726 /* Ok, it's mapped. Make sure it's up-to-date */
2727 if (!PageUptodate(page)) {
2728 err = mapping->a_ops->readpage(NULL, page);
2729 if (err) {
2730 page_cache_release(page);
2731 goto out;
2733 lock_page(page);
2734 if (!PageUptodate(page)) {
2735 err = -EIO;
2736 goto unlock;
2738 if (page_has_buffers(page))
2739 goto has_buffers;
2741 zero_user(page, offset, length);
2742 set_page_dirty(page);
2743 err = 0;
2745 unlock:
2746 unlock_page(page);
2747 page_cache_release(page);
2748 out:
2749 return err;
2751 EXPORT_SYMBOL(nobh_truncate_page);
2753 int block_truncate_page(struct address_space *mapping,
2754 loff_t from, get_block_t *get_block)
2756 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2757 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2758 unsigned blocksize;
2759 sector_t iblock;
2760 unsigned length, pos;
2761 struct inode *inode = mapping->host;
2762 struct page *page;
2763 struct buffer_head *bh;
2764 int err;
2766 blocksize = 1 << inode->i_blkbits;
2767 length = offset & (blocksize - 1);
2769 /* Block boundary? Nothing to do */
2770 if (!length)
2771 return 0;
2773 length = blocksize - length;
2774 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2776 page = grab_cache_page(mapping, index);
2777 err = -ENOMEM;
2778 if (!page)
2779 goto out;
2781 if (!page_has_buffers(page))
2782 create_empty_buffers(page, blocksize, 0);
2784 /* Find the buffer that contains "offset" */
2785 bh = page_buffers(page);
2786 pos = blocksize;
2787 while (offset >= pos) {
2788 bh = bh->b_this_page;
2789 iblock++;
2790 pos += blocksize;
2793 err = 0;
2794 if (!buffer_mapped(bh)) {
2795 WARN_ON(bh->b_size != blocksize);
2796 err = get_block(inode, iblock, bh, 0);
2797 if (err)
2798 goto unlock;
2799 /* unmapped? It's a hole - nothing to do */
2800 if (!buffer_mapped(bh))
2801 goto unlock;
2804 /* Ok, it's mapped. Make sure it's up-to-date */
2805 if (PageUptodate(page))
2806 set_buffer_uptodate(bh);
2808 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2809 err = -EIO;
2810 ll_rw_block(READ, 1, &bh);
2811 wait_on_buffer(bh);
2812 /* Uhhuh. Read error. Complain and punt. */
2813 if (!buffer_uptodate(bh))
2814 goto unlock;
2817 zero_user(page, offset, length);
2818 mark_buffer_dirty(bh);
2819 err = 0;
2821 unlock:
2822 unlock_page(page);
2823 page_cache_release(page);
2824 out:
2825 return err;
2827 EXPORT_SYMBOL(block_truncate_page);
2830 * The generic ->writepage function for buffer-backed address_spaces
2831 * this form passes in the end_io handler used to finish the IO.
2833 int block_write_full_page_endio(struct page *page, get_block_t *get_block,
2834 struct writeback_control *wbc, bh_end_io_t *handler)
2836 struct inode * const inode = page->mapping->host;
2837 loff_t i_size = i_size_read(inode);
2838 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2839 unsigned offset;
2841 /* Is the page fully inside i_size? */
2842 if (page->index < end_index)
2843 return __block_write_full_page(inode, page, get_block, wbc,
2844 handler);
2846 /* Is the page fully outside i_size? (truncate in progress) */
2847 offset = i_size & (PAGE_CACHE_SIZE-1);
2848 if (page->index >= end_index+1 || !offset) {
2850 * The page may have dirty, unmapped buffers. For example,
2851 * they may have been added in ext3_writepage(). Make them
2852 * freeable here, so the page does not leak.
2854 do_invalidatepage(page, 0);
2855 unlock_page(page);
2856 return 0; /* don't care */
2860 * The page straddles i_size. It must be zeroed out on each and every
2861 * writepage invocation because it may be mmapped. "A file is mapped
2862 * in multiples of the page size. For a file that is not a multiple of
2863 * the page size, the remaining memory is zeroed when mapped, and
2864 * writes to that region are not written out to the file."
2866 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2867 return __block_write_full_page(inode, page, get_block, wbc, handler);
2869 EXPORT_SYMBOL(block_write_full_page_endio);
2872 * The generic ->writepage function for buffer-backed address_spaces
2874 int block_write_full_page(struct page *page, get_block_t *get_block,
2875 struct writeback_control *wbc)
2877 return block_write_full_page_endio(page, get_block, wbc,
2878 end_buffer_async_write);
2880 EXPORT_SYMBOL(block_write_full_page);
2882 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2883 get_block_t *get_block)
2885 struct buffer_head tmp;
2886 struct inode *inode = mapping->host;
2887 tmp.b_state = 0;
2888 tmp.b_blocknr = 0;
2889 tmp.b_size = 1 << inode->i_blkbits;
2890 get_block(inode, block, &tmp, 0);
2891 return tmp.b_blocknr;
2893 EXPORT_SYMBOL(generic_block_bmap);
2895 static void end_bio_bh_io_sync(struct bio *bio, int err)
2897 struct buffer_head *bh = bio->bi_private;
2899 if (err == -EOPNOTSUPP) {
2900 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2903 if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags)))
2904 set_bit(BH_Quiet, &bh->b_state);
2906 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2907 bio_put(bio);
2910 int submit_bh(int rw, struct buffer_head * bh)
2912 struct bio *bio;
2913 int ret = 0;
2915 BUG_ON(!buffer_locked(bh));
2916 BUG_ON(!buffer_mapped(bh));
2917 BUG_ON(!bh->b_end_io);
2918 BUG_ON(buffer_delay(bh));
2919 BUG_ON(buffer_unwritten(bh));
2922 * Only clear out a write error when rewriting
2924 if (test_set_buffer_req(bh) && (rw & WRITE))
2925 clear_buffer_write_io_error(bh);
2928 * from here on down, it's all bio -- do the initial mapping,
2929 * submit_bio -> generic_make_request may further map this bio around
2931 bio = bio_alloc(GFP_NOIO, 1);
2933 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2934 bio->bi_bdev = bh->b_bdev;
2935 bio->bi_io_vec[0].bv_page = bh->b_page;
2936 bio->bi_io_vec[0].bv_len = bh->b_size;
2937 bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2939 bio->bi_vcnt = 1;
2940 bio->bi_idx = 0;
2941 bio->bi_size = bh->b_size;
2943 bio->bi_end_io = end_bio_bh_io_sync;
2944 bio->bi_private = bh;
2946 bio_get(bio);
2947 submit_bio(rw, bio);
2949 if (bio_flagged(bio, BIO_EOPNOTSUPP))
2950 ret = -EOPNOTSUPP;
2952 bio_put(bio);
2953 return ret;
2955 EXPORT_SYMBOL(submit_bh);
2958 * ll_rw_block: low-level access to block devices (DEPRECATED)
2959 * @rw: whether to %READ or %WRITE or maybe %READA (readahead)
2960 * @nr: number of &struct buffer_heads in the array
2961 * @bhs: array of pointers to &struct buffer_head
2963 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2964 * requests an I/O operation on them, either a %READ or a %WRITE. The third
2965 * %READA option is described in the documentation for generic_make_request()
2966 * which ll_rw_block() calls.
2968 * This function drops any buffer that it cannot get a lock on (with the
2969 * BH_Lock state bit), any buffer that appears to be clean when doing a write
2970 * request, and any buffer that appears to be up-to-date when doing read
2971 * request. Further it marks as clean buffers that are processed for
2972 * writing (the buffer cache won't assume that they are actually clean
2973 * until the buffer gets unlocked).
2975 * ll_rw_block sets b_end_io to simple completion handler that marks
2976 * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2977 * any waiters.
2979 * All of the buffers must be for the same device, and must also be a
2980 * multiple of the current approved size for the device.
2982 void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2984 int i;
2986 for (i = 0; i < nr; i++) {
2987 struct buffer_head *bh = bhs[i];
2989 if (!trylock_buffer(bh))
2990 continue;
2991 if (rw == WRITE) {
2992 if (test_clear_buffer_dirty(bh)) {
2993 bh->b_end_io = end_buffer_write_sync;
2994 get_bh(bh);
2995 submit_bh(WRITE, bh);
2996 continue;
2998 } else {
2999 if (!buffer_uptodate(bh)) {
3000 bh->b_end_io = end_buffer_read_sync;
3001 get_bh(bh);
3002 submit_bh(rw, bh);
3003 continue;
3006 unlock_buffer(bh);
3009 EXPORT_SYMBOL(ll_rw_block);
3011 void write_dirty_buffer(struct buffer_head *bh, int rw)
3013 lock_buffer(bh);
3014 if (!test_clear_buffer_dirty(bh)) {
3015 unlock_buffer(bh);
3016 return;
3018 bh->b_end_io = end_buffer_write_sync;
3019 get_bh(bh);
3020 submit_bh(rw, bh);
3022 EXPORT_SYMBOL(write_dirty_buffer);
3025 * For a data-integrity writeout, we need to wait upon any in-progress I/O
3026 * and then start new I/O and then wait upon it. The caller must have a ref on
3027 * the buffer_head.
3029 int __sync_dirty_buffer(struct buffer_head *bh, int rw)
3031 int ret = 0;
3033 WARN_ON(atomic_read(&bh->b_count) < 1);
3034 lock_buffer(bh);
3035 if (test_clear_buffer_dirty(bh)) {
3036 get_bh(bh);
3037 bh->b_end_io = end_buffer_write_sync;
3038 ret = submit_bh(rw, bh);
3039 wait_on_buffer(bh);
3040 if (!ret && !buffer_uptodate(bh))
3041 ret = -EIO;
3042 } else {
3043 unlock_buffer(bh);
3045 return ret;
3047 EXPORT_SYMBOL(__sync_dirty_buffer);
3049 int sync_dirty_buffer(struct buffer_head *bh)
3051 return __sync_dirty_buffer(bh, WRITE_SYNC);
3053 EXPORT_SYMBOL(sync_dirty_buffer);
3056 * try_to_free_buffers() checks if all the buffers on this particular page
3057 * are unused, and releases them if so.
3059 * Exclusion against try_to_free_buffers may be obtained by either
3060 * locking the page or by holding its mapping's private_lock.
3062 * If the page is dirty but all the buffers are clean then we need to
3063 * be sure to mark the page clean as well. This is because the page
3064 * may be against a block device, and a later reattachment of buffers
3065 * to a dirty page will set *all* buffers dirty. Which would corrupt
3066 * filesystem data on the same device.
3068 * The same applies to regular filesystem pages: if all the buffers are
3069 * clean then we set the page clean and proceed. To do that, we require
3070 * total exclusion from __set_page_dirty_buffers(). That is obtained with
3071 * private_lock.
3073 * try_to_free_buffers() is non-blocking.
3075 static inline int buffer_busy(struct buffer_head *bh)
3077 return atomic_read(&bh->b_count) |
3078 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
3081 static int
3082 drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
3084 struct buffer_head *head = page_buffers(page);
3085 struct buffer_head *bh;
3087 bh = head;
3088 do {
3089 if (buffer_write_io_error(bh) && page->mapping)
3090 set_bit(AS_EIO, &page->mapping->flags);
3091 if (buffer_busy(bh))
3092 goto failed;
3093 bh = bh->b_this_page;
3094 } while (bh != head);
3096 do {
3097 struct buffer_head *next = bh->b_this_page;
3099 if (bh->b_assoc_map)
3100 __remove_assoc_queue(bh);
3101 bh = next;
3102 } while (bh != head);
3103 *buffers_to_free = head;
3104 __clear_page_buffers(page);
3105 return 1;
3106 failed:
3107 return 0;
3110 int try_to_free_buffers(struct page *page)
3112 struct address_space * const mapping = page->mapping;
3113 struct buffer_head *buffers_to_free = NULL;
3114 int ret = 0;
3116 BUG_ON(!PageLocked(page));
3117 if (PageWriteback(page))
3118 return 0;
3120 if (mapping == NULL) { /* can this still happen? */
3121 ret = drop_buffers(page, &buffers_to_free);
3122 goto out;
3125 spin_lock(&mapping->private_lock);
3126 ret = drop_buffers(page, &buffers_to_free);
3129 * If the filesystem writes its buffers by hand (eg ext3)
3130 * then we can have clean buffers against a dirty page. We
3131 * clean the page here; otherwise the VM will never notice
3132 * that the filesystem did any IO at all.
3134 * Also, during truncate, discard_buffer will have marked all
3135 * the page's buffers clean. We discover that here and clean
3136 * the page also.
3138 * private_lock must be held over this entire operation in order
3139 * to synchronise against __set_page_dirty_buffers and prevent the
3140 * dirty bit from being lost.
3142 if (ret)
3143 cancel_dirty_page(page, PAGE_CACHE_SIZE);
3144 spin_unlock(&mapping->private_lock);
3145 out:
3146 if (buffers_to_free) {
3147 struct buffer_head *bh = buffers_to_free;
3149 do {
3150 struct buffer_head *next = bh->b_this_page;
3151 free_buffer_head(bh);
3152 bh = next;
3153 } while (bh != buffers_to_free);
3155 return ret;
3157 EXPORT_SYMBOL(try_to_free_buffers);
3160 * There are no bdflush tunables left. But distributions are
3161 * still running obsolete flush daemons, so we terminate them here.
3163 * Use of bdflush() is deprecated and will be removed in a future kernel.
3164 * The `flush-X' kernel threads fully replace bdflush daemons and this call.
3166 SYSCALL_DEFINE2(bdflush, int, func, long, data)
3168 static int msg_count;
3170 if (!capable(CAP_SYS_ADMIN))
3171 return -EPERM;
3173 if (msg_count < 5) {
3174 msg_count++;
3175 printk(KERN_INFO
3176 "warning: process `%s' used the obsolete bdflush"
3177 " system call\n", current->comm);
3178 printk(KERN_INFO "Fix your initscripts?\n");
3181 if (func == 1)
3182 do_exit(0);
3183 return 0;
3187 * Buffer-head allocation
3189 static struct kmem_cache *bh_cachep;
3192 * Once the number of bh's in the machine exceeds this level, we start
3193 * stripping them in writeback.
3195 static int max_buffer_heads;
3197 int buffer_heads_over_limit;
3199 struct bh_accounting {
3200 int nr; /* Number of live bh's */
3201 int ratelimit; /* Limit cacheline bouncing */
3204 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3206 static void recalc_bh_state(void)
3208 int i;
3209 int tot = 0;
3211 if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096)
3212 return;
3213 __this_cpu_write(bh_accounting.ratelimit, 0);
3214 for_each_online_cpu(i)
3215 tot += per_cpu(bh_accounting, i).nr;
3216 buffer_heads_over_limit = (tot > max_buffer_heads);
3219 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3221 struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
3222 if (ret) {
3223 INIT_LIST_HEAD(&ret->b_assoc_buffers);
3224 preempt_disable();
3225 __this_cpu_inc(bh_accounting.nr);
3226 recalc_bh_state();
3227 preempt_enable();
3229 return ret;
3231 EXPORT_SYMBOL(alloc_buffer_head);
3233 void free_buffer_head(struct buffer_head *bh)
3235 BUG_ON(!list_empty(&bh->b_assoc_buffers));
3236 kmem_cache_free(bh_cachep, bh);
3237 preempt_disable();
3238 __this_cpu_dec(bh_accounting.nr);
3239 recalc_bh_state();
3240 preempt_enable();
3242 EXPORT_SYMBOL(free_buffer_head);
3244 static void buffer_exit_cpu(int cpu)
3246 int i;
3247 struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3249 for (i = 0; i < BH_LRU_SIZE; i++) {
3250 brelse(b->bhs[i]);
3251 b->bhs[i] = NULL;
3253 this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr);
3254 per_cpu(bh_accounting, cpu).nr = 0;
3257 static int buffer_cpu_notify(struct notifier_block *self,
3258 unsigned long action, void *hcpu)
3260 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
3261 buffer_exit_cpu((unsigned long)hcpu);
3262 return NOTIFY_OK;
3266 * bh_uptodate_or_lock - Test whether the buffer is uptodate
3267 * @bh: struct buffer_head
3269 * Return true if the buffer is up-to-date and false,
3270 * with the buffer locked, if not.
3272 int bh_uptodate_or_lock(struct buffer_head *bh)
3274 if (!buffer_uptodate(bh)) {
3275 lock_buffer(bh);
3276 if (!buffer_uptodate(bh))
3277 return 0;
3278 unlock_buffer(bh);
3280 return 1;
3282 EXPORT_SYMBOL(bh_uptodate_or_lock);
3285 * bh_submit_read - Submit a locked buffer for reading
3286 * @bh: struct buffer_head
3288 * Returns zero on success and -EIO on error.
3290 int bh_submit_read(struct buffer_head *bh)
3292 BUG_ON(!buffer_locked(bh));
3294 if (buffer_uptodate(bh)) {
3295 unlock_buffer(bh);
3296 return 0;
3299 get_bh(bh);
3300 bh->b_end_io = end_buffer_read_sync;
3301 submit_bh(READ, bh);
3302 wait_on_buffer(bh);
3303 if (buffer_uptodate(bh))
3304 return 0;
3305 return -EIO;
3307 EXPORT_SYMBOL(bh_submit_read);
3309 void __init buffer_init(void)
3311 int nrpages;
3313 bh_cachep = kmem_cache_create("buffer_head",
3314 sizeof(struct buffer_head), 0,
3315 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3316 SLAB_MEM_SPREAD),
3317 NULL);
3320 * Limit the bh occupancy to 10% of ZONE_NORMAL
3322 nrpages = (nr_free_buffer_pages() * 10) / 100;
3323 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3324 hotcpu_notifier(buffer_cpu_notify, 0);