4 * Copyright (C) 2002, Linus Torvalds.
6 * Contains functions related to preparing and submitting BIOs which contain
7 * multiple pagecache pages.
9 * 15May2002 akpm@zip.com.au
11 * 27Jun2002 axboe@suse.de
12 * use bio_add_page() to build bio's just the right size
15 #include <linux/kernel.h>
16 #include <linux/module.h>
18 #include <linux/kdev_t.h>
19 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/blkdev.h>
23 #include <linux/highmem.h>
24 #include <linux/prefetch.h>
25 #include <linux/mpage.h>
26 #include <linux/writeback.h>
27 #include <linux/backing-dev.h>
28 #include <linux/pagevec.h>
31 * I/O completion handler for multipage BIOs.
33 * The mpage code never puts partial pages into a BIO (except for end-of-file).
34 * If a page does not map to a contiguous run of blocks then it simply falls
35 * back to block_read_full_page().
37 * Why is this? If a page's completion depends on a number of different BIOs
38 * which can complete in any order (or at the same time) then determining the
39 * status of that page is hard. See end_buffer_async_read() for the details.
40 * There is no point in duplicating all that complexity.
42 static int mpage_end_io_read(struct bio
*bio
, unsigned int bytes_done
, int err
)
44 const int uptodate
= test_bit(BIO_UPTODATE
, &bio
->bi_flags
);
45 struct bio_vec
*bvec
= bio
->bi_io_vec
+ bio
->bi_vcnt
- 1;
51 struct page
*page
= bvec
->bv_page
;
53 if (--bvec
>= bio
->bi_io_vec
)
54 prefetchw(&bvec
->bv_page
->flags
);
57 SetPageUptodate(page
);
59 ClearPageUptodate(page
);
63 } while (bvec
>= bio
->bi_io_vec
);
68 static int mpage_end_io_write(struct bio
*bio
, unsigned int bytes_done
, int err
)
70 const int uptodate
= test_bit(BIO_UPTODATE
, &bio
->bi_flags
);
71 struct bio_vec
*bvec
= bio
->bi_io_vec
+ bio
->bi_vcnt
- 1;
77 struct page
*page
= bvec
->bv_page
;
79 if (--bvec
>= bio
->bi_io_vec
)
80 prefetchw(&bvec
->bv_page
->flags
);
85 set_bit(AS_EIO
, &page
->mapping
->flags
);
87 end_page_writeback(page
);
88 } while (bvec
>= bio
->bi_io_vec
);
93 static struct bio
*mpage_bio_submit(int rw
, struct bio
*bio
)
95 bio
->bi_end_io
= mpage_end_io_read
;
97 bio
->bi_end_io
= mpage_end_io_write
;
103 mpage_alloc(struct block_device
*bdev
,
104 sector_t first_sector
, int nr_vecs
,
105 unsigned int __nocast gfp_flags
)
109 bio
= bio_alloc(gfp_flags
, nr_vecs
);
111 if (bio
== NULL
&& (current
->flags
& PF_MEMALLOC
)) {
112 while (!bio
&& (nr_vecs
/= 2))
113 bio
= bio_alloc(gfp_flags
, nr_vecs
);
118 bio
->bi_sector
= first_sector
;
124 * support function for mpage_readpages. The fs supplied get_block might
125 * return an up to date buffer. This is used to map that buffer into
126 * the page, which allows readpage to avoid triggering a duplicate call
129 * The idea is to avoid adding buffers to pages that don't already have
130 * them. So when the buffer is up to date and the page size == block size,
131 * this marks the page up to date instead of adding new buffers.
134 map_buffer_to_page(struct page
*page
, struct buffer_head
*bh
, int page_block
)
136 struct inode
*inode
= page
->mapping
->host
;
137 struct buffer_head
*page_bh
, *head
;
140 if (!page_has_buffers(page
)) {
142 * don't make any buffers if there is only one buffer on
143 * the page and the page just needs to be set up to date
145 if (inode
->i_blkbits
== PAGE_CACHE_SHIFT
&&
146 buffer_uptodate(bh
)) {
147 SetPageUptodate(page
);
150 create_empty_buffers(page
, 1 << inode
->i_blkbits
, 0);
152 head
= page_buffers(page
);
155 if (block
== page_block
) {
156 page_bh
->b_state
= bh
->b_state
;
157 page_bh
->b_bdev
= bh
->b_bdev
;
158 page_bh
->b_blocknr
= bh
->b_blocknr
;
161 page_bh
= page_bh
->b_this_page
;
163 } while (page_bh
!= head
);
167 do_mpage_readpage(struct bio
*bio
, struct page
*page
, unsigned nr_pages
,
168 sector_t
*last_block_in_bio
, get_block_t get_block
)
170 struct inode
*inode
= page
->mapping
->host
;
171 const unsigned blkbits
= inode
->i_blkbits
;
172 const unsigned blocks_per_page
= PAGE_CACHE_SIZE
>> blkbits
;
173 const unsigned blocksize
= 1 << blkbits
;
174 sector_t block_in_file
;
176 sector_t blocks
[MAX_BUF_PER_PAGE
];
178 unsigned first_hole
= blocks_per_page
;
179 struct block_device
*bdev
= NULL
;
180 struct buffer_head bh
;
182 int fully_mapped
= 1;
184 if (page_has_buffers(page
))
187 block_in_file
= page
->index
<< (PAGE_CACHE_SHIFT
- blkbits
);
188 last_block
= (i_size_read(inode
) + blocksize
- 1) >> blkbits
;
191 for (page_block
= 0; page_block
< blocks_per_page
;
192 page_block
++, block_in_file
++) {
194 if (block_in_file
< last_block
) {
195 if (get_block(inode
, block_in_file
, &bh
, 0))
199 if (!buffer_mapped(&bh
)) {
201 if (first_hole
== blocks_per_page
)
202 first_hole
= page_block
;
206 /* some filesystems will copy data into the page during
207 * the get_block call, in which case we don't want to
208 * read it again. map_buffer_to_page copies the data
209 * we just collected from get_block into the page's buffers
210 * so readpage doesn't have to repeat the get_block call
212 if (buffer_uptodate(&bh
)) {
213 map_buffer_to_page(page
, &bh
, page_block
);
217 if (first_hole
!= blocks_per_page
)
218 goto confused
; /* hole -> non-hole */
220 /* Contiguous blocks? */
221 if (page_block
&& blocks
[page_block
-1] != bh
.b_blocknr
-1)
223 blocks
[page_block
] = bh
.b_blocknr
;
227 if (first_hole
!= blocks_per_page
) {
228 char *kaddr
= kmap_atomic(page
, KM_USER0
);
229 memset(kaddr
+ (first_hole
<< blkbits
), 0,
230 PAGE_CACHE_SIZE
- (first_hole
<< blkbits
));
231 flush_dcache_page(page
);
232 kunmap_atomic(kaddr
, KM_USER0
);
233 if (first_hole
== 0) {
234 SetPageUptodate(page
);
238 } else if (fully_mapped
) {
239 SetPageMappedToDisk(page
);
243 * This page will go to BIO. Do we need to send this BIO off first?
245 if (bio
&& (*last_block_in_bio
!= blocks
[0] - 1))
246 bio
= mpage_bio_submit(READ
, bio
);
250 bio
= mpage_alloc(bdev
, blocks
[0] << (blkbits
- 9),
251 min_t(int, nr_pages
, bio_get_nr_vecs(bdev
)),
257 length
= first_hole
<< blkbits
;
258 if (bio_add_page(bio
, page
, length
, 0) < length
) {
259 bio
= mpage_bio_submit(READ
, bio
);
263 if (buffer_boundary(&bh
) || (first_hole
!= blocks_per_page
))
264 bio
= mpage_bio_submit(READ
, bio
);
266 *last_block_in_bio
= blocks
[blocks_per_page
- 1];
272 bio
= mpage_bio_submit(READ
, bio
);
273 if (!PageUptodate(page
))
274 block_read_full_page(page
, get_block
);
281 * mpage_readpages - populate an address space with some pages, and
282 * start reads against them.
284 * @mapping: the address_space
285 * @pages: The address of a list_head which contains the target pages. These
286 * pages have their ->index populated and are otherwise uninitialised.
288 * The page at @pages->prev has the lowest file offset, and reads should be
289 * issued in @pages->prev to @pages->next order.
291 * @nr_pages: The number of pages at *@pages
292 * @get_block: The filesystem's block mapper function.
294 * This function walks the pages and the blocks within each page, building and
295 * emitting large BIOs.
297 * If anything unusual happens, such as:
299 * - encountering a page which has buffers
300 * - encountering a page which has a non-hole after a hole
301 * - encountering a page with non-contiguous blocks
303 * then this code just gives up and calls the buffer_head-based read function.
304 * It does handle a page which has holes at the end - that is a common case:
305 * the end-of-file on blocksize < PAGE_CACHE_SIZE setups.
307 * BH_Boundary explanation:
309 * There is a problem. The mpage read code assembles several pages, gets all
310 * their disk mappings, and then submits them all. That's fine, but obtaining
311 * the disk mappings may require I/O. Reads of indirect blocks, for example.
313 * So an mpage read of the first 16 blocks of an ext2 file will cause I/O to be
314 * submitted in the following order:
315 * 12 0 1 2 3 4 5 6 7 8 9 10 11 13 14 15 16
316 * because the indirect block has to be read to get the mappings of blocks
317 * 13,14,15,16. Obviously, this impacts performance.
319 * So what we do it to allow the filesystem's get_block() function to set
320 * BH_Boundary when it maps block 11. BH_Boundary says: mapping of the block
321 * after this one will require I/O against a block which is probably close to
322 * this one. So you should push what I/O you have currently accumulated.
324 * This all causes the disk requests to be issued in the correct order.
327 mpage_readpages(struct address_space
*mapping
, struct list_head
*pages
,
328 unsigned nr_pages
, get_block_t get_block
)
330 struct bio
*bio
= NULL
;
332 sector_t last_block_in_bio
= 0;
333 struct pagevec lru_pvec
;
335 pagevec_init(&lru_pvec
, 0);
336 for (page_idx
= 0; page_idx
< nr_pages
; page_idx
++) {
337 struct page
*page
= list_entry(pages
->prev
, struct page
, lru
);
339 prefetchw(&page
->flags
);
340 list_del(&page
->lru
);
341 if (!add_to_page_cache(page
, mapping
,
342 page
->index
, GFP_KERNEL
)) {
343 bio
= do_mpage_readpage(bio
, page
,
345 &last_block_in_bio
, get_block
);
346 if (!pagevec_add(&lru_pvec
, page
))
347 __pagevec_lru_add(&lru_pvec
);
349 page_cache_release(page
);
352 pagevec_lru_add(&lru_pvec
);
353 BUG_ON(!list_empty(pages
));
355 mpage_bio_submit(READ
, bio
);
358 EXPORT_SYMBOL(mpage_readpages
);
361 * This isn't called much at all
363 int mpage_readpage(struct page
*page
, get_block_t get_block
)
365 struct bio
*bio
= NULL
;
366 sector_t last_block_in_bio
= 0;
368 bio
= do_mpage_readpage(bio
, page
, 1,
369 &last_block_in_bio
, get_block
);
371 mpage_bio_submit(READ
, bio
);
374 EXPORT_SYMBOL(mpage_readpage
);
377 * Writing is not so simple.
379 * If the page has buffers then they will be used for obtaining the disk
380 * mapping. We only support pages which are fully mapped-and-dirty, with a
381 * special case for pages which are unmapped at the end: end-of-file.
383 * If the page has no buffers (preferred) then the page is mapped here.
385 * If all blocks are found to be contiguous then the page can go into the
386 * BIO. Otherwise fall back to the mapping's writepage().
388 * FIXME: This code wants an estimate of how many pages are still to be
389 * written, so it can intelligently allocate a suitably-sized BIO. For now,
390 * just allocate full-size (16-page) BIOs.
393 __mpage_writepage(struct bio
*bio
, struct page
*page
, get_block_t get_block
,
394 sector_t
*last_block_in_bio
, int *ret
, struct writeback_control
*wbc
,
395 writepage_t writepage_fn
)
397 struct address_space
*mapping
= page
->mapping
;
398 struct inode
*inode
= page
->mapping
->host
;
399 const unsigned blkbits
= inode
->i_blkbits
;
400 unsigned long end_index
;
401 const unsigned blocks_per_page
= PAGE_CACHE_SIZE
>> blkbits
;
403 sector_t block_in_file
;
404 sector_t blocks
[MAX_BUF_PER_PAGE
];
406 unsigned first_unmapped
= blocks_per_page
;
407 struct block_device
*bdev
= NULL
;
409 sector_t boundary_block
= 0;
410 struct block_device
*boundary_bdev
= NULL
;
412 struct buffer_head map_bh
;
413 loff_t i_size
= i_size_read(inode
);
415 if (page_has_buffers(page
)) {
416 struct buffer_head
*head
= page_buffers(page
);
417 struct buffer_head
*bh
= head
;
419 /* If they're all mapped and dirty, do it */
422 BUG_ON(buffer_locked(bh
));
423 if (!buffer_mapped(bh
)) {
425 * unmapped dirty buffers are created by
426 * __set_page_dirty_buffers -> mmapped data
428 if (buffer_dirty(bh
))
430 if (first_unmapped
== blocks_per_page
)
431 first_unmapped
= page_block
;
435 if (first_unmapped
!= blocks_per_page
)
436 goto confused
; /* hole -> non-hole */
438 if (!buffer_dirty(bh
) || !buffer_uptodate(bh
))
441 if (bh
->b_blocknr
!= blocks
[page_block
-1] + 1)
444 blocks
[page_block
++] = bh
->b_blocknr
;
445 boundary
= buffer_boundary(bh
);
447 boundary_block
= bh
->b_blocknr
;
448 boundary_bdev
= bh
->b_bdev
;
451 } while ((bh
= bh
->b_this_page
) != head
);
457 * Page has buffers, but they are all unmapped. The page was
458 * created by pagein or read over a hole which was handled by
459 * block_read_full_page(). If this address_space is also
460 * using mpage_readpages then this can rarely happen.
466 * The page has no buffers: map it to disk
468 BUG_ON(!PageUptodate(page
));
469 block_in_file
= page
->index
<< (PAGE_CACHE_SHIFT
- blkbits
);
470 last_block
= (i_size
- 1) >> blkbits
;
471 map_bh
.b_page
= page
;
472 for (page_block
= 0; page_block
< blocks_per_page
; ) {
475 if (get_block(inode
, block_in_file
, &map_bh
, 1))
477 if (buffer_new(&map_bh
))
478 unmap_underlying_metadata(map_bh
.b_bdev
,
480 if (buffer_boundary(&map_bh
)) {
481 boundary_block
= map_bh
.b_blocknr
;
482 boundary_bdev
= map_bh
.b_bdev
;
485 if (map_bh
.b_blocknr
!= blocks
[page_block
-1] + 1)
488 blocks
[page_block
++] = map_bh
.b_blocknr
;
489 boundary
= buffer_boundary(&map_bh
);
490 bdev
= map_bh
.b_bdev
;
491 if (block_in_file
== last_block
)
495 BUG_ON(page_block
== 0);
497 first_unmapped
= page_block
;
500 end_index
= i_size
>> PAGE_CACHE_SHIFT
;
501 if (page
->index
>= end_index
) {
503 * The page straddles i_size. It must be zeroed out on each
504 * and every writepage invokation because it may be mmapped.
505 * "A file is mapped in multiples of the page size. For a file
506 * that is not a multiple of the page size, the remaining memory
507 * is zeroed when mapped, and writes to that region are not
508 * written out to the file."
510 unsigned offset
= i_size
& (PAGE_CACHE_SIZE
- 1);
513 if (page
->index
> end_index
|| !offset
)
515 kaddr
= kmap_atomic(page
, KM_USER0
);
516 memset(kaddr
+ offset
, 0, PAGE_CACHE_SIZE
- offset
);
517 flush_dcache_page(page
);
518 kunmap_atomic(kaddr
, KM_USER0
);
522 * This page will go to BIO. Do we need to send this BIO off first?
524 if (bio
&& *last_block_in_bio
!= blocks
[0] - 1)
525 bio
= mpage_bio_submit(WRITE
, bio
);
529 bio
= mpage_alloc(bdev
, blocks
[0] << (blkbits
- 9),
530 bio_get_nr_vecs(bdev
), GFP_NOFS
|__GFP_HIGH
);
536 * Must try to add the page before marking the buffer clean or
537 * the confused fail path above (OOM) will be very confused when
538 * it finds all bh marked clean (i.e. it will not write anything)
540 length
= first_unmapped
<< blkbits
;
541 if (bio_add_page(bio
, page
, length
, 0) < length
) {
542 bio
= mpage_bio_submit(WRITE
, bio
);
547 * OK, we have our BIO, so we can now mark the buffers clean. Make
548 * sure to only clean buffers which we know we'll be writing.
550 if (page_has_buffers(page
)) {
551 struct buffer_head
*head
= page_buffers(page
);
552 struct buffer_head
*bh
= head
;
553 unsigned buffer_counter
= 0;
556 if (buffer_counter
++ == first_unmapped
)
558 clear_buffer_dirty(bh
);
559 bh
= bh
->b_this_page
;
560 } while (bh
!= head
);
563 * we cannot drop the bh if the page is not uptodate
564 * or a concurrent readpage would fail to serialize with the bh
565 * and it would read from disk before we reach the platter.
567 if (buffer_heads_over_limit
&& PageUptodate(page
))
568 try_to_free_buffers(page
);
571 BUG_ON(PageWriteback(page
));
572 set_page_writeback(page
);
574 if (boundary
|| (first_unmapped
!= blocks_per_page
)) {
575 bio
= mpage_bio_submit(WRITE
, bio
);
576 if (boundary_block
) {
577 write_boundary_block(boundary_bdev
,
578 boundary_block
, 1 << blkbits
);
581 *last_block_in_bio
= blocks
[blocks_per_page
- 1];
587 bio
= mpage_bio_submit(WRITE
, bio
);
590 *ret
= (*writepage_fn
)(page
, wbc
);
596 * The caller has a ref on the inode, so *mapping is stable
600 set_bit(AS_ENOSPC
, &mapping
->flags
);
602 set_bit(AS_EIO
, &mapping
->flags
);
609 * mpage_writepages - walk the list of dirty pages of the given
610 * address space and writepage() all of them.
612 * @mapping: address space structure to write
613 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
614 * @get_block: the filesystem's block mapper function.
615 * If this is NULL then use a_ops->writepage. Otherwise, go
618 * This is a library function, which implements the writepages()
619 * address_space_operation.
621 * If a page is already under I/O, generic_writepages() skips it, even
622 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
623 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
624 * and msync() need to guarantee that all the data which was dirty at the time
625 * the call was made get new I/O started against them. If wbc->sync_mode is
626 * WB_SYNC_ALL then we were called for data integrity and we must wait for
627 * existing IO to complete.
630 mpage_writepages(struct address_space
*mapping
,
631 struct writeback_control
*wbc
, get_block_t get_block
)
633 struct backing_dev_info
*bdi
= mapping
->backing_dev_info
;
634 struct bio
*bio
= NULL
;
635 sector_t last_block_in_bio
= 0;
638 int (*writepage
)(struct page
*page
, struct writeback_control
*wbc
);
642 pgoff_t end
= -1; /* Inclusive */
646 if (wbc
->nonblocking
&& bdi_write_congested(bdi
)) {
647 wbc
->encountered_congestion
= 1;
652 if (get_block
== NULL
)
653 writepage
= mapping
->a_ops
->writepage
;
655 pagevec_init(&pvec
, 0);
656 if (wbc
->sync_mode
== WB_SYNC_NONE
) {
657 index
= mapping
->writeback_index
; /* Start from prev offset */
659 index
= 0; /* whole-file sweep */
662 if (wbc
->start
|| wbc
->end
) {
663 index
= wbc
->start
>> PAGE_CACHE_SHIFT
;
664 end
= wbc
->end
>> PAGE_CACHE_SHIFT
;
669 while (!done
&& (index
<= end
) &&
670 (nr_pages
= pagevec_lookup_tag(&pvec
, mapping
, &index
,
672 min(end
- index
, (pgoff_t
)PAGEVEC_SIZE
-1) + 1))) {
676 for (i
= 0; i
< nr_pages
; i
++) {
677 struct page
*page
= pvec
.pages
[i
];
680 * At this point we hold neither mapping->tree_lock nor
681 * lock on the page itself: the page may be truncated or
682 * invalidated (changing page->mapping to NULL), or even
683 * swizzled back from swapper_space to tmpfs file
689 if (unlikely(page
->mapping
!= mapping
)) {
694 if (unlikely(is_range
) && page
->index
> end
) {
700 if (wbc
->sync_mode
!= WB_SYNC_NONE
)
701 wait_on_page_writeback(page
);
703 if (PageWriteback(page
) ||
704 !clear_page_dirty_for_io(page
)) {
710 ret
= (*writepage
)(page
, wbc
);
720 bio
= __mpage_writepage(bio
, page
, get_block
,
721 &last_block_in_bio
, &ret
, wbc
,
722 page
->mapping
->a_ops
->writepage
);
724 if (unlikely(ret
== WRITEPAGE_ACTIVATE
))
726 if (ret
|| (--(wbc
->nr_to_write
) <= 0))
728 if (wbc
->nonblocking
&& bdi_write_congested(bdi
)) {
729 wbc
->encountered_congestion
= 1;
733 pagevec_release(&pvec
);
736 if (!scanned
&& !done
) {
738 * We hit the last page and there is more work to be done: wrap
739 * back to the start of the file
746 mapping
->writeback_index
= index
;
748 mpage_bio_submit(WRITE
, bio
);
751 EXPORT_SYMBOL(mpage_writepages
);
753 int mpage_writepage(struct page
*page
, get_block_t get_block
,
754 struct writeback_control
*wbc
)
758 sector_t last_block_in_bio
= 0;
760 bio
= __mpage_writepage(NULL
, page
, get_block
,
761 &last_block_in_bio
, &ret
, wbc
, NULL
);
763 mpage_bio_submit(WRITE
, bio
);
767 EXPORT_SYMBOL(mpage_writepage
);