4 * Copyright (C) 2002, Linus Torvalds.
6 * Contains functions related to preparing and submitting BIOs which contain
7 * multiple pagecache pages.
9 * 15May2002 akpm@zip.com.au
11 * 27Jun2002 axboe@suse.de
12 * use bio_add_page() to build bio's just the right size
15 #include <linux/kernel.h>
16 #include <linux/module.h>
18 #include <linux/kdev_t.h>
19 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/blkdev.h>
23 #include <linux/highmem.h>
24 #include <linux/prefetch.h>
25 #include <linux/mpage.h>
26 #include <linux/writeback.h>
27 #include <linux/backing-dev.h>
28 #include <linux/pagevec.h>
31 * I/O completion handler for multipage BIOs.
33 * The mpage code never puts partial pages into a BIO (except for end-of-file).
34 * If a page does not map to a contiguous run of blocks then it simply falls
35 * back to block_read_full_page().
37 * Why is this? If a page's completion depends on a number of different BIOs
38 * which can complete in any order (or at the same time) then determining the
39 * status of that page is hard. See end_buffer_async_read() for the details.
40 * There is no point in duplicating all that complexity.
42 static int mpage_end_io_read(struct bio
*bio
, unsigned int bytes_done
, int err
)
44 const int uptodate
= test_bit(BIO_UPTODATE
, &bio
->bi_flags
);
45 struct bio_vec
*bvec
= bio
->bi_io_vec
+ bio
->bi_vcnt
- 1;
51 struct page
*page
= bvec
->bv_page
;
53 if (--bvec
>= bio
->bi_io_vec
)
54 prefetchw(&bvec
->bv_page
->flags
);
57 SetPageUptodate(page
);
59 ClearPageUptodate(page
);
63 } while (bvec
>= bio
->bi_io_vec
);
68 static int mpage_end_io_write(struct bio
*bio
, unsigned int bytes_done
, int err
)
70 const int uptodate
= test_bit(BIO_UPTODATE
, &bio
->bi_flags
);
71 struct bio_vec
*bvec
= bio
->bi_io_vec
+ bio
->bi_vcnt
- 1;
77 struct page
*page
= bvec
->bv_page
;
79 if (--bvec
>= bio
->bi_io_vec
)
80 prefetchw(&bvec
->bv_page
->flags
);
84 end_page_writeback(page
);
85 } while (bvec
>= bio
->bi_io_vec
);
90 struct bio
*mpage_bio_submit(int rw
, struct bio
*bio
)
92 bio
->bi_end_io
= mpage_end_io_read
;
94 bio
->bi_end_io
= mpage_end_io_write
;
100 mpage_alloc(struct block_device
*bdev
,
101 sector_t first_sector
, int nr_vecs
, int gfp_flags
)
105 bio
= bio_alloc(gfp_flags
, nr_vecs
);
107 if (bio
== NULL
&& (current
->flags
& PF_MEMALLOC
)) {
108 while (!bio
&& (nr_vecs
/= 2))
109 bio
= bio_alloc(gfp_flags
, nr_vecs
);
114 bio
->bi_sector
= first_sector
;
120 * support function for mpage_readpages. The fs supplied get_block might
121 * return an up to date buffer. This is used to map that buffer into
122 * the page, which allows readpage to avoid triggering a duplicate call
125 * The idea is to avoid adding buffers to pages that don't already have
126 * them. So when the buffer is up to date and the page size == block size,
127 * this marks the page up to date instead of adding new buffers.
130 map_buffer_to_page(struct page
*page
, struct buffer_head
*bh
, int page_block
)
132 struct inode
*inode
= page
->mapping
->host
;
133 struct buffer_head
*page_bh
, *head
;
136 if (!page_has_buffers(page
)) {
138 * don't make any buffers if there is only one buffer on
139 * the page and the page just needs to be set up to date
141 if (inode
->i_blkbits
== PAGE_CACHE_SHIFT
&&
142 buffer_uptodate(bh
)) {
143 SetPageUptodate(page
);
146 create_empty_buffers(page
, 1 << inode
->i_blkbits
, 0);
148 head
= page_buffers(page
);
151 if (block
== page_block
) {
152 page_bh
->b_state
= bh
->b_state
;
153 page_bh
->b_bdev
= bh
->b_bdev
;
154 page_bh
->b_blocknr
= bh
->b_blocknr
;
157 page_bh
= page_bh
->b_this_page
;
159 } while (page_bh
!= head
);
163 * mpage_readpages - populate an address space with some pages, and
164 * start reads against them.
166 * @mapping: the address_space
167 * @pages: The address of a list_head which contains the target pages. These
168 * pages have their ->index populated and are otherwise uninitialised.
170 * The page at @pages->prev has the lowest file offset, and reads should be
171 * issued in @pages->prev to @pages->next order.
173 * @nr_pages: The number of pages at *@pages
174 * @get_block: The filesystem's block mapper function.
176 * This function walks the pages and the blocks within each page, building and
177 * emitting large BIOs.
179 * If anything unusual happens, such as:
181 * - encountering a page which has buffers
182 * - encountering a page which has a non-hole after a hole
183 * - encountering a page with non-contiguous blocks
185 * then this code just gives up and calls the buffer_head-based read function.
186 * It does handle a page which has holes at the end - that is a common case:
187 * the end-of-file on blocksize < PAGE_CACHE_SIZE setups.
189 * BH_Boundary explanation:
191 * There is a problem. The mpage read code assembles several pages, gets all
192 * their disk mappings, and then submits them all. That's fine, but obtaining
193 * the disk mappings may require I/O. Reads of indirect blocks, for example.
195 * So an mpage read of the first 16 blocks of an ext2 file will cause I/O to be
196 * submitted in the following order:
197 * 12 0 1 2 3 4 5 6 7 8 9 10 11 13 14 15 16
198 * because the indirect block has to be read to get the mappings of blocks
199 * 13,14,15,16. Obviously, this impacts performance.
201 * So what we do it to allow the filesystem's get_block() function to set
202 * BH_Boundary when it maps block 11. BH_Boundary says: mapping of the block
203 * after this one will require I/O against a block which is probably close to
204 * this one. So you should push what I/O you have currently accumulated.
206 * This all causes the disk requests to be issued in the correct order.
209 do_mpage_readpage(struct bio
*bio
, struct page
*page
, unsigned nr_pages
,
210 sector_t
*last_block_in_bio
, get_block_t get_block
)
212 struct inode
*inode
= page
->mapping
->host
;
213 const unsigned blkbits
= inode
->i_blkbits
;
214 const unsigned blocks_per_page
= PAGE_CACHE_SIZE
>> blkbits
;
215 const unsigned blocksize
= 1 << blkbits
;
216 sector_t block_in_file
;
218 sector_t blocks
[MAX_BUF_PER_PAGE
];
220 unsigned first_hole
= blocks_per_page
;
221 struct block_device
*bdev
= NULL
;
222 struct buffer_head bh
;
224 int fully_mapped
= 1;
226 if (page_has_buffers(page
))
229 block_in_file
= page
->index
<< (PAGE_CACHE_SHIFT
- blkbits
);
230 last_block
= (i_size_read(inode
) + blocksize
- 1) >> blkbits
;
233 for (page_block
= 0; page_block
< blocks_per_page
;
234 page_block
++, block_in_file
++) {
236 if (block_in_file
< last_block
) {
237 if (get_block(inode
, block_in_file
, &bh
, 0))
241 if (!buffer_mapped(&bh
)) {
243 if (first_hole
== blocks_per_page
)
244 first_hole
= page_block
;
248 /* some filesystems will copy data into the page during
249 * the get_block call, in which case we don't want to
250 * read it again. map_buffer_to_page copies the data
251 * we just collected from get_block into the page's buffers
252 * so readpage doesn't have to repeat the get_block call
254 if (buffer_uptodate(&bh
)) {
255 map_buffer_to_page(page
, &bh
, page_block
);
259 if (first_hole
!= blocks_per_page
)
260 goto confused
; /* hole -> non-hole */
262 /* Contiguous blocks? */
263 if (page_block
&& blocks
[page_block
-1] != bh
.b_blocknr
-1)
265 blocks
[page_block
] = bh
.b_blocknr
;
269 if (first_hole
!= blocks_per_page
) {
270 char *kaddr
= kmap_atomic(page
, KM_USER0
);
271 memset(kaddr
+ (first_hole
<< blkbits
), 0,
272 PAGE_CACHE_SIZE
- (first_hole
<< blkbits
));
273 flush_dcache_page(page
);
274 kunmap_atomic(kaddr
, KM_USER0
);
275 if (first_hole
== 0) {
276 SetPageUptodate(page
);
280 } else if (fully_mapped
) {
281 SetPageMappedToDisk(page
);
285 * This page will go to BIO. Do we need to send this BIO off first?
287 if (bio
&& (*last_block_in_bio
!= blocks
[0] - 1))
288 bio
= mpage_bio_submit(READ
, bio
);
292 bio
= mpage_alloc(bdev
, blocks
[0] << (blkbits
- 9),
293 nr_pages
, GFP_KERNEL
);
298 length
= first_hole
<< blkbits
;
299 if (bio_add_page(bio
, page
, length
, 0) < length
) {
300 bio
= mpage_bio_submit(READ
, bio
);
304 if (buffer_boundary(&bh
) || (first_hole
!= blocks_per_page
))
305 bio
= mpage_bio_submit(READ
, bio
);
307 *last_block_in_bio
= blocks
[blocks_per_page
- 1];
313 bio
= mpage_bio_submit(READ
, bio
);
314 if (!PageUptodate(page
))
315 block_read_full_page(page
, get_block
);
322 mpage_readpages(struct address_space
*mapping
, struct list_head
*pages
,
323 unsigned nr_pages
, get_block_t get_block
)
325 struct bio
*bio
= NULL
;
327 sector_t last_block_in_bio
= 0;
328 struct pagevec lru_pvec
;
330 pagevec_init(&lru_pvec
, 0);
331 for (page_idx
= 0; page_idx
< nr_pages
; page_idx
++) {
332 struct page
*page
= list_entry(pages
->prev
, struct page
, list
);
334 prefetchw(&page
->flags
);
335 list_del(&page
->list
);
336 if (!add_to_page_cache(page
, mapping
,
337 page
->index
, GFP_KERNEL
)) {
338 bio
= do_mpage_readpage(bio
, page
,
340 &last_block_in_bio
, get_block
);
341 if (!pagevec_add(&lru_pvec
, page
))
342 __pagevec_lru_add(&lru_pvec
);
344 page_cache_release(page
);
347 pagevec_lru_add(&lru_pvec
);
348 BUG_ON(!list_empty(pages
));
350 mpage_bio_submit(READ
, bio
);
353 EXPORT_SYMBOL(mpage_readpages
);
356 * This isn't called much at all
358 int mpage_readpage(struct page
*page
, get_block_t get_block
)
360 struct bio
*bio
= NULL
;
361 sector_t last_block_in_bio
= 0;
363 bio
= do_mpage_readpage(bio
, page
, 1,
364 &last_block_in_bio
, get_block
);
366 mpage_bio_submit(READ
, bio
);
369 EXPORT_SYMBOL(mpage_readpage
);
372 * Writing is not so simple.
374 * If the page has buffers then they will be used for obtaining the disk
375 * mapping. We only support pages which are fully mapped-and-dirty, with a
376 * special case for pages which are unmapped at the end: end-of-file.
378 * If the page has no buffers (preferred) then the page is mapped here.
380 * If all blocks are found to be contiguous then the page can go into the
381 * BIO. Otherwise fall back to the mapping's writepage().
383 * FIXME: This code wants an estimate of how many pages are still to be
384 * written, so it can intelligently allocate a suitably-sized BIO. For now,
385 * just allocate full-size (16-page) BIOs.
388 mpage_writepage(struct bio
*bio
, struct page
*page
, get_block_t get_block
,
389 sector_t
*last_block_in_bio
, int *ret
, struct writeback_control
*wbc
)
391 struct inode
*inode
= page
->mapping
->host
;
392 const unsigned blkbits
= inode
->i_blkbits
;
393 unsigned long end_index
;
394 const unsigned blocks_per_page
= PAGE_CACHE_SIZE
>> blkbits
;
396 sector_t block_in_file
;
397 sector_t blocks
[MAX_BUF_PER_PAGE
];
399 unsigned first_unmapped
= blocks_per_page
;
400 struct block_device
*bdev
= NULL
;
402 sector_t boundary_block
= 0;
403 struct block_device
*boundary_bdev
= NULL
;
405 struct buffer_head map_bh
;
407 if (page_has_buffers(page
)) {
408 struct buffer_head
*head
= page_buffers(page
);
409 struct buffer_head
*bh
= head
;
411 /* If they're all mapped and dirty, do it */
414 BUG_ON(buffer_locked(bh
));
415 if (!buffer_mapped(bh
)) {
417 * unmapped dirty buffers are created by
418 * __set_page_dirty_buffers -> mmapped data
420 if (buffer_dirty(bh
))
422 if (first_unmapped
== blocks_per_page
)
423 first_unmapped
= page_block
;
427 if (first_unmapped
!= blocks_per_page
)
428 goto confused
; /* hole -> non-hole */
430 if (!buffer_dirty(bh
) || !buffer_uptodate(bh
))
433 if (bh
->b_blocknr
!= blocks
[page_block
-1] + 1)
436 blocks
[page_block
++] = bh
->b_blocknr
;
437 boundary
= buffer_boundary(bh
);
439 boundary_block
= bh
->b_blocknr
;
440 boundary_bdev
= bh
->b_bdev
;
443 } while ((bh
= bh
->b_this_page
) != head
);
449 * Page has buffers, but they are all unmapped. The page was
450 * created by pagein or read over a hole which was handled by
451 * block_read_full_page(). If this address_space is also
452 * using mpage_readpages then this can rarely happen.
458 * The page has no buffers: map it to disk
460 BUG_ON(!PageUptodate(page
));
461 block_in_file
= page
->index
<< (PAGE_CACHE_SHIFT
- blkbits
);
462 last_block
= (i_size_read(inode
) - 1) >> blkbits
;
463 map_bh
.b_page
= page
;
464 for (page_block
= 0; page_block
< blocks_per_page
; ) {
467 if (get_block(inode
, block_in_file
, &map_bh
, 1))
469 if (buffer_new(&map_bh
))
470 unmap_underlying_metadata(map_bh
.b_bdev
,
472 if (buffer_boundary(&map_bh
)) {
473 boundary_block
= map_bh
.b_blocknr
;
474 boundary_bdev
= map_bh
.b_bdev
;
477 if (map_bh
.b_blocknr
!= blocks
[page_block
-1] + 1)
480 blocks
[page_block
++] = map_bh
.b_blocknr
;
481 boundary
= buffer_boundary(&map_bh
);
482 bdev
= map_bh
.b_bdev
;
483 if (block_in_file
== last_block
)
490 first_unmapped
= page_block
;
492 end_index
= i_size_read(inode
) >> PAGE_CACHE_SHIFT
;
493 if (page
->index
>= end_index
) {
494 unsigned offset
= i_size_read(inode
) & (PAGE_CACHE_SIZE
- 1);
497 if (page
->index
> end_index
|| !offset
)
499 kaddr
= kmap_atomic(page
, KM_USER0
);
500 memset(kaddr
+ offset
, 0, PAGE_CACHE_SIZE
- offset
);
501 flush_dcache_page(page
);
502 kunmap_atomic(kaddr
, KM_USER0
);
508 * This page will go to BIO. Do we need to send this BIO off first?
510 if (bio
&& *last_block_in_bio
!= blocks
[0] - 1)
511 bio
= mpage_bio_submit(WRITE
, bio
);
515 bio
= mpage_alloc(bdev
, blocks
[0] << (blkbits
- 9),
516 bio_get_nr_vecs(bdev
), GFP_NOFS
|__GFP_HIGH
);
522 * OK, we have our BIO, so we can now mark the buffers clean. Make
523 * sure to only clean buffers which we know we'll be writing.
525 if (page_has_buffers(page
)) {
526 struct buffer_head
*head
= page_buffers(page
);
527 struct buffer_head
*bh
= head
;
528 unsigned buffer_counter
= 0;
531 if (buffer_counter
++ == first_unmapped
)
533 clear_buffer_dirty(bh
);
534 bh
= bh
->b_this_page
;
535 } while (bh
!= head
);
537 if (buffer_heads_over_limit
)
538 try_to_free_buffers(page
);
541 length
= first_unmapped
<< blkbits
;
542 if (bio_add_page(bio
, page
, length
, 0) < length
) {
543 bio
= mpage_bio_submit(WRITE
, bio
);
547 BUG_ON(PageWriteback(page
));
548 SetPageWriteback(page
);
550 if (boundary
|| (first_unmapped
!= blocks_per_page
)) {
551 bio
= mpage_bio_submit(WRITE
, bio
);
552 if (boundary_block
) {
553 write_boundary_block(boundary_bdev
,
554 boundary_block
, 1 << blkbits
);
557 *last_block_in_bio
= blocks
[blocks_per_page
- 1];
563 bio
= mpage_bio_submit(WRITE
, bio
);
564 *ret
= page
->mapping
->a_ops
->writepage(page
, wbc
);
570 * mpage_writepages - walk the list of dirty pages of the given
571 * address space and writepage() all of them.
573 * @mapping: address space structure to write
574 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
575 * @get_block: the filesystem's block mapper function.
576 * If this is NULL then use a_ops->writepage. Otherwise, go
579 * This is a library function, which implements the writepages()
580 * address_space_operation.
582 * (The next two paragraphs refer to code which isn't here yet, but they
583 * explain the presence of address_space.io_pages)
585 * Pages can be moved from clean_pages or locked_pages onto dirty_pages
586 * at any time - it's not possible to lock against that. So pages which
587 * have already been added to a BIO may magically reappear on the dirty_pages
588 * list. And mpage_writepages() will again try to lock those pages.
589 * But I/O has not yet been started against the page. Thus deadlock.
591 * To avoid this, mpage_writepages() will only write pages from io_pages. The
592 * caller must place them there. We walk io_pages, locking the pages and
593 * submitting them for I/O, moving them to locked_pages.
595 * This has the added benefit of preventing a livelock which would otherwise
596 * occur if pages are being dirtied faster than we can write them out.
598 * If a page is already under I/O, generic_writepages() skips it, even
599 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
600 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
601 * and msync() need to guarantee that all the data which was dirty at the time
602 * the call was made get new I/O started against them. So if called_for_sync()
603 * is true, we must wait for existing IO to complete.
605 * It's fairly rare for PageWriteback pages to be on ->dirty_pages. It
606 * means that someone redirtied the page while it was under I/O.
609 mpage_writepages(struct address_space
*mapping
,
610 struct writeback_control
*wbc
, get_block_t get_block
)
612 struct backing_dev_info
*bdi
= mapping
->backing_dev_info
;
613 struct bio
*bio
= NULL
;
614 sector_t last_block_in_bio
= 0;
617 int (*writepage
)(struct page
*page
, struct writeback_control
*wbc
);
619 if (wbc
->nonblocking
&& bdi_write_congested(bdi
)) {
620 wbc
->encountered_congestion
= 1;
625 if (get_block
== NULL
)
626 writepage
= mapping
->a_ops
->writepage
;
628 spin_lock(&mapping
->page_lock
);
629 while (!list_empty(&mapping
->io_pages
) && !done
) {
630 struct page
*page
= list_entry(mapping
->io_pages
.prev
,
632 list_del(&page
->list
);
633 if (PageWriteback(page
) && wbc
->sync_mode
== WB_SYNC_NONE
) {
634 if (PageDirty(page
)) {
635 list_add(&page
->list
, &mapping
->dirty_pages
);
638 list_add(&page
->list
, &mapping
->locked_pages
);
641 if (!PageDirty(page
)) {
642 list_add(&page
->list
, &mapping
->clean_pages
);
645 list_add(&page
->list
, &mapping
->locked_pages
);
647 page_cache_get(page
);
648 spin_unlock(&mapping
->page_lock
);
651 * At this point we hold neither mapping->page_lock nor
652 * lock on the page itself: the page may be truncated or
653 * invalidated (changing page->mapping to NULL), or even
654 * swizzled back from swapper_space to tmpfs file mapping.
659 if (wbc
->sync_mode
!= WB_SYNC_NONE
)
660 wait_on_page_writeback(page
);
662 if (page
->mapping
== mapping
&& !PageWriteback(page
) &&
663 test_clear_page_dirty(page
)) {
665 ret
= (*writepage
)(page
, wbc
);
667 bio
= mpage_writepage(bio
, page
, get_block
,
668 &last_block_in_bio
, &ret
, wbc
);
670 if (ret
|| (--(wbc
->nr_to_write
) <= 0))
672 if (wbc
->nonblocking
&& bdi_write_congested(bdi
)) {
673 wbc
->encountered_congestion
= 1;
679 page_cache_release(page
);
680 spin_lock(&mapping
->page_lock
);
683 * Leave any remaining dirty pages on ->io_pages
685 spin_unlock(&mapping
->page_lock
);
687 mpage_bio_submit(WRITE
, bio
);
690 EXPORT_SYMBOL(mpage_writepages
);