2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 #include "xfs_trans.h"
25 #include "xfs_mount.h"
26 #include "xfs_bmap_btree.h"
27 #include "xfs_dinode.h"
28 #include "xfs_inode.h"
29 #include "xfs_alloc.h"
30 #include "xfs_error.h"
32 #include "xfs_iomap.h"
33 #include "xfs_vnodeops.h"
34 #include "xfs_trace.h"
36 #include <linux/gfp.h>
37 #include <linux/mpage.h>
38 #include <linux/pagevec.h>
39 #include <linux/writeback.h>
47 struct buffer_head
*bh
, *head
;
49 *delalloc
= *unwritten
= 0;
51 bh
= head
= page_buffers(page
);
53 if (buffer_unwritten(bh
))
55 else if (buffer_delay(bh
))
57 } while ((bh
= bh
->b_this_page
) != head
);
60 STATIC
struct block_device
*
61 xfs_find_bdev_for_inode(
64 struct xfs_inode
*ip
= XFS_I(inode
);
65 struct xfs_mount
*mp
= ip
->i_mount
;
67 if (XFS_IS_REALTIME_INODE(ip
))
68 return mp
->m_rtdev_targp
->bt_bdev
;
70 return mp
->m_ddev_targp
->bt_bdev
;
74 * We're now finished for good with this ioend structure.
75 * Update the page state via the associated buffer_heads,
76 * release holds on the inode and bio, and finally free
77 * up memory. Do not use the ioend after this.
83 struct buffer_head
*bh
, *next
;
85 for (bh
= ioend
->io_buffer_head
; bh
; bh
= next
) {
87 bh
->b_end_io(bh
, !ioend
->io_error
);
91 if (ioend
->io_isasync
) {
92 aio_complete(ioend
->io_iocb
, ioend
->io_error
?
93 ioend
->io_error
: ioend
->io_result
, 0);
95 inode_dio_done(ioend
->io_inode
);
98 mempool_free(ioend
, xfs_ioend_pool
);
102 * Fast and loose check if this write could update the on-disk inode size.
104 static inline bool xfs_ioend_is_append(struct xfs_ioend
*ioend
)
106 return ioend
->io_offset
+ ioend
->io_size
>
107 XFS_I(ioend
->io_inode
)->i_d
.di_size
;
111 * Update on-disk file size now that data has been written to disk.
115 struct xfs_ioend
*ioend
)
117 struct xfs_inode
*ip
= XFS_I(ioend
->io_inode
);
120 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
121 isize
= xfs_new_eof(ip
, ioend
->io_offset
+ ioend
->io_size
);
123 trace_xfs_setfilesize(ip
, ioend
->io_offset
, ioend
->io_size
);
124 ip
->i_d
.di_size
= isize
;
125 xfs_mark_inode_dirty(ip
);
128 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
132 * Schedule IO completion handling on the final put of an ioend.
134 * If there is no work to do we might as well call it a day and free the
139 struct xfs_ioend
*ioend
)
141 if (atomic_dec_and_test(&ioend
->io_remaining
)) {
142 struct xfs_mount
*mp
= XFS_I(ioend
->io_inode
)->i_mount
;
144 if (ioend
->io_type
== IO_UNWRITTEN
)
145 queue_work(mp
->m_unwritten_workqueue
, &ioend
->io_work
);
146 else if (xfs_ioend_is_append(ioend
))
147 queue_work(mp
->m_data_workqueue
, &ioend
->io_work
);
149 xfs_destroy_ioend(ioend
);
154 * IO write completion.
158 struct work_struct
*work
)
160 xfs_ioend_t
*ioend
= container_of(work
, xfs_ioend_t
, io_work
);
161 struct xfs_inode
*ip
= XFS_I(ioend
->io_inode
);
164 if (XFS_FORCED_SHUTDOWN(ip
->i_mount
)) {
165 ioend
->io_error
= -EIO
;
172 * For unwritten extents we need to issue transactions to convert a
173 * range to normal written extens after the data I/O has finished.
175 if (ioend
->io_type
== IO_UNWRITTEN
) {
176 error
= xfs_iomap_write_unwritten(ip
, ioend
->io_offset
,
179 ioend
->io_error
= -error
;
184 * We might have to update the on-disk file size after
187 xfs_setfilesize(ioend
);
191 xfs_destroy_ioend(ioend
);
195 * Call IO completion handling in caller context on the final put of an ioend.
198 xfs_finish_ioend_sync(
199 struct xfs_ioend
*ioend
)
201 if (atomic_dec_and_test(&ioend
->io_remaining
))
202 xfs_end_io(&ioend
->io_work
);
206 * Allocate and initialise an IO completion structure.
207 * We need to track unwritten extent write completion here initially.
208 * We'll need to extend this for updating the ondisk inode size later
218 ioend
= mempool_alloc(xfs_ioend_pool
, GFP_NOFS
);
221 * Set the count to 1 initially, which will prevent an I/O
222 * completion callback from happening before we have started
223 * all the I/O from calling the completion routine too early.
225 atomic_set(&ioend
->io_remaining
, 1);
226 ioend
->io_isasync
= 0;
228 ioend
->io_list
= NULL
;
229 ioend
->io_type
= type
;
230 ioend
->io_inode
= inode
;
231 ioend
->io_buffer_head
= NULL
;
232 ioend
->io_buffer_tail
= NULL
;
233 ioend
->io_offset
= 0;
235 ioend
->io_iocb
= NULL
;
236 ioend
->io_result
= 0;
238 INIT_WORK(&ioend
->io_work
, xfs_end_io
);
246 struct xfs_bmbt_irec
*imap
,
250 struct xfs_inode
*ip
= XFS_I(inode
);
251 struct xfs_mount
*mp
= ip
->i_mount
;
252 ssize_t count
= 1 << inode
->i_blkbits
;
253 xfs_fileoff_t offset_fsb
, end_fsb
;
255 int bmapi_flags
= XFS_BMAPI_ENTIRE
;
258 if (XFS_FORCED_SHUTDOWN(mp
))
259 return -XFS_ERROR(EIO
);
261 if (type
== IO_UNWRITTEN
)
262 bmapi_flags
|= XFS_BMAPI_IGSTATE
;
264 if (!xfs_ilock_nowait(ip
, XFS_ILOCK_SHARED
)) {
266 return -XFS_ERROR(EAGAIN
);
267 xfs_ilock(ip
, XFS_ILOCK_SHARED
);
270 ASSERT(ip
->i_d
.di_format
!= XFS_DINODE_FMT_BTREE
||
271 (ip
->i_df
.if_flags
& XFS_IFEXTENTS
));
272 ASSERT(offset
<= mp
->m_maxioffset
);
274 if (offset
+ count
> mp
->m_maxioffset
)
275 count
= mp
->m_maxioffset
- offset
;
276 end_fsb
= XFS_B_TO_FSB(mp
, (xfs_ufsize_t
)offset
+ count
);
277 offset_fsb
= XFS_B_TO_FSBT(mp
, offset
);
278 error
= xfs_bmapi_read(ip
, offset_fsb
, end_fsb
- offset_fsb
,
279 imap
, &nimaps
, bmapi_flags
);
280 xfs_iunlock(ip
, XFS_ILOCK_SHARED
);
283 return -XFS_ERROR(error
);
285 if (type
== IO_DELALLOC
&&
286 (!nimaps
|| isnullstartblock(imap
->br_startblock
))) {
287 error
= xfs_iomap_write_allocate(ip
, offset
, count
, imap
);
289 trace_xfs_map_blocks_alloc(ip
, offset
, count
, type
, imap
);
290 return -XFS_ERROR(error
);
294 if (type
== IO_UNWRITTEN
) {
296 ASSERT(imap
->br_startblock
!= HOLESTARTBLOCK
);
297 ASSERT(imap
->br_startblock
!= DELAYSTARTBLOCK
);
301 trace_xfs_map_blocks_found(ip
, offset
, count
, type
, imap
);
308 struct xfs_bmbt_irec
*imap
,
311 offset
>>= inode
->i_blkbits
;
313 return offset
>= imap
->br_startoff
&&
314 offset
< imap
->br_startoff
+ imap
->br_blockcount
;
318 * BIO completion handler for buffered IO.
325 xfs_ioend_t
*ioend
= bio
->bi_private
;
327 ASSERT(atomic_read(&bio
->bi_cnt
) >= 1);
328 ioend
->io_error
= test_bit(BIO_UPTODATE
, &bio
->bi_flags
) ? 0 : error
;
330 /* Toss bio and pass work off to an xfsdatad thread */
331 bio
->bi_private
= NULL
;
332 bio
->bi_end_io
= NULL
;
335 xfs_finish_ioend(ioend
);
339 xfs_submit_ioend_bio(
340 struct writeback_control
*wbc
,
344 struct xfs_inode
*ip
= XFS_I(ioend
->io_inode
);
345 atomic_inc(&ioend
->io_remaining
);
346 bio
->bi_private
= ioend
;
347 bio
->bi_end_io
= xfs_end_bio
;
350 * If the I/O is beyond EOF we mark the inode dirty immediately
351 * but don't update the inode size until I/O completion.
353 if (xfs_new_eof(ip
, ioend
->io_offset
+ ioend
->io_size
))
354 xfs_mark_inode_dirty(ip
);
356 submit_bio(wbc
->sync_mode
== WB_SYNC_ALL
? WRITE_SYNC
: WRITE
, bio
);
361 struct buffer_head
*bh
)
363 int nvecs
= bio_get_nr_vecs(bh
->b_bdev
);
364 struct bio
*bio
= bio_alloc(GFP_NOIO
, nvecs
);
366 ASSERT(bio
->bi_private
== NULL
);
367 bio
->bi_sector
= bh
->b_blocknr
* (bh
->b_size
>> 9);
368 bio
->bi_bdev
= bh
->b_bdev
;
373 xfs_start_buffer_writeback(
374 struct buffer_head
*bh
)
376 ASSERT(buffer_mapped(bh
));
377 ASSERT(buffer_locked(bh
));
378 ASSERT(!buffer_delay(bh
));
379 ASSERT(!buffer_unwritten(bh
));
381 mark_buffer_async_write(bh
);
382 set_buffer_uptodate(bh
);
383 clear_buffer_dirty(bh
);
387 xfs_start_page_writeback(
392 ASSERT(PageLocked(page
));
393 ASSERT(!PageWriteback(page
));
395 clear_page_dirty_for_io(page
);
396 set_page_writeback(page
);
398 /* If no buffers on the page are to be written, finish it here */
400 end_page_writeback(page
);
403 static inline int bio_add_buffer(struct bio
*bio
, struct buffer_head
*bh
)
405 return bio_add_page(bio
, bh
->b_page
, bh
->b_size
, bh_offset(bh
));
409 * Submit all of the bios for all of the ioends we have saved up, covering the
410 * initial writepage page and also any probed pages.
412 * Because we may have multiple ioends spanning a page, we need to start
413 * writeback on all the buffers before we submit them for I/O. If we mark the
414 * buffers as we got, then we can end up with a page that only has buffers
415 * marked async write and I/O complete on can occur before we mark the other
416 * buffers async write.
418 * The end result of this is that we trip a bug in end_page_writeback() because
419 * we call it twice for the one page as the code in end_buffer_async_write()
420 * assumes that all buffers on the page are started at the same time.
422 * The fix is two passes across the ioend list - one to start writeback on the
423 * buffer_heads, and then submit them for I/O on the second pass.
427 struct writeback_control
*wbc
,
430 xfs_ioend_t
*head
= ioend
;
432 struct buffer_head
*bh
;
434 sector_t lastblock
= 0;
436 /* Pass 1 - start writeback */
438 next
= ioend
->io_list
;
439 for (bh
= ioend
->io_buffer_head
; bh
; bh
= bh
->b_private
)
440 xfs_start_buffer_writeback(bh
);
441 } while ((ioend
= next
) != NULL
);
443 /* Pass 2 - submit I/O */
446 next
= ioend
->io_list
;
449 for (bh
= ioend
->io_buffer_head
; bh
; bh
= bh
->b_private
) {
453 bio
= xfs_alloc_ioend_bio(bh
);
454 } else if (bh
->b_blocknr
!= lastblock
+ 1) {
455 xfs_submit_ioend_bio(wbc
, ioend
, bio
);
459 if (bio_add_buffer(bio
, bh
) != bh
->b_size
) {
460 xfs_submit_ioend_bio(wbc
, ioend
, bio
);
464 lastblock
= bh
->b_blocknr
;
467 xfs_submit_ioend_bio(wbc
, ioend
, bio
);
468 xfs_finish_ioend(ioend
);
469 } while ((ioend
= next
) != NULL
);
473 * Cancel submission of all buffer_heads so far in this endio.
474 * Toss the endio too. Only ever called for the initial page
475 * in a writepage request, so only ever one page.
482 struct buffer_head
*bh
, *next_bh
;
485 next
= ioend
->io_list
;
486 bh
= ioend
->io_buffer_head
;
488 next_bh
= bh
->b_private
;
489 clear_buffer_async_write(bh
);
491 } while ((bh
= next_bh
) != NULL
);
493 mempool_free(ioend
, xfs_ioend_pool
);
494 } while ((ioend
= next
) != NULL
);
498 * Test to see if we've been building up a completion structure for
499 * earlier buffers -- if so, we try to append to this ioend if we
500 * can, otherwise we finish off any current ioend and start another.
501 * Return true if we've finished the given ioend.
506 struct buffer_head
*bh
,
509 xfs_ioend_t
**result
,
512 xfs_ioend_t
*ioend
= *result
;
514 if (!ioend
|| need_ioend
|| type
!= ioend
->io_type
) {
515 xfs_ioend_t
*previous
= *result
;
517 ioend
= xfs_alloc_ioend(inode
, type
);
518 ioend
->io_offset
= offset
;
519 ioend
->io_buffer_head
= bh
;
520 ioend
->io_buffer_tail
= bh
;
522 previous
->io_list
= ioend
;
525 ioend
->io_buffer_tail
->b_private
= bh
;
526 ioend
->io_buffer_tail
= bh
;
529 bh
->b_private
= NULL
;
530 ioend
->io_size
+= bh
->b_size
;
536 struct buffer_head
*bh
,
537 struct xfs_bmbt_irec
*imap
,
541 struct xfs_mount
*m
= XFS_I(inode
)->i_mount
;
542 xfs_off_t iomap_offset
= XFS_FSB_TO_B(m
, imap
->br_startoff
);
543 xfs_daddr_t iomap_bn
= xfs_fsb_to_db(XFS_I(inode
), imap
->br_startblock
);
545 ASSERT(imap
->br_startblock
!= HOLESTARTBLOCK
);
546 ASSERT(imap
->br_startblock
!= DELAYSTARTBLOCK
);
548 bn
= (iomap_bn
>> (inode
->i_blkbits
- BBSHIFT
)) +
549 ((offset
- iomap_offset
) >> inode
->i_blkbits
);
551 ASSERT(bn
|| XFS_IS_REALTIME_INODE(XFS_I(inode
)));
554 set_buffer_mapped(bh
);
560 struct buffer_head
*bh
,
561 struct xfs_bmbt_irec
*imap
,
564 ASSERT(imap
->br_startblock
!= HOLESTARTBLOCK
);
565 ASSERT(imap
->br_startblock
!= DELAYSTARTBLOCK
);
567 xfs_map_buffer(inode
, bh
, imap
, offset
);
568 set_buffer_mapped(bh
);
569 clear_buffer_delay(bh
);
570 clear_buffer_unwritten(bh
);
574 * Test if a given page is suitable for writing as part of an unwritten
575 * or delayed allocate extent.
582 if (PageWriteback(page
))
585 if (page
->mapping
&& page_has_buffers(page
)) {
586 struct buffer_head
*bh
, *head
;
589 bh
= head
= page_buffers(page
);
591 if (buffer_unwritten(bh
))
592 acceptable
= (type
== IO_UNWRITTEN
);
593 else if (buffer_delay(bh
))
594 acceptable
= (type
== IO_DELALLOC
);
595 else if (buffer_dirty(bh
) && buffer_mapped(bh
))
596 acceptable
= (type
== IO_OVERWRITE
);
599 } while ((bh
= bh
->b_this_page
) != head
);
609 * Allocate & map buffers for page given the extent map. Write it out.
610 * except for the original page of a writepage, this is called on
611 * delalloc/unwritten pages only, for the original page it is possible
612 * that the page has no mapping at all.
619 struct xfs_bmbt_irec
*imap
,
620 xfs_ioend_t
**ioendp
,
621 struct writeback_control
*wbc
)
623 struct buffer_head
*bh
, *head
;
624 xfs_off_t end_offset
;
625 unsigned long p_offset
;
628 int count
= 0, done
= 0, uptodate
= 1;
629 xfs_off_t offset
= page_offset(page
);
631 if (page
->index
!= tindex
)
633 if (!trylock_page(page
))
635 if (PageWriteback(page
))
636 goto fail_unlock_page
;
637 if (page
->mapping
!= inode
->i_mapping
)
638 goto fail_unlock_page
;
639 if (!xfs_is_delayed_page(page
, (*ioendp
)->io_type
))
640 goto fail_unlock_page
;
643 * page_dirty is initially a count of buffers on the page before
644 * EOF and is decremented as we move each into a cleanable state.
648 * End offset is the highest offset that this page should represent.
649 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
650 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
651 * hence give us the correct page_dirty count. On any other page,
652 * it will be zero and in that case we need page_dirty to be the
653 * count of buffers on the page.
655 end_offset
= min_t(unsigned long long,
656 (xfs_off_t
)(page
->index
+ 1) << PAGE_CACHE_SHIFT
,
659 len
= 1 << inode
->i_blkbits
;
660 p_offset
= min_t(unsigned long, end_offset
& (PAGE_CACHE_SIZE
- 1),
662 p_offset
= p_offset
? roundup(p_offset
, len
) : PAGE_CACHE_SIZE
;
663 page_dirty
= p_offset
/ len
;
665 bh
= head
= page_buffers(page
);
667 if (offset
>= end_offset
)
669 if (!buffer_uptodate(bh
))
671 if (!(PageUptodate(page
) || buffer_uptodate(bh
))) {
676 if (buffer_unwritten(bh
) || buffer_delay(bh
) ||
678 if (buffer_unwritten(bh
))
680 else if (buffer_delay(bh
))
685 if (!xfs_imap_valid(inode
, imap
, offset
)) {
691 if (type
!= IO_OVERWRITE
)
692 xfs_map_at_offset(inode
, bh
, imap
, offset
);
693 xfs_add_to_ioend(inode
, bh
, offset
, type
,
701 } while (offset
+= len
, (bh
= bh
->b_this_page
) != head
);
703 if (uptodate
&& bh
== head
)
704 SetPageUptodate(page
);
707 if (--wbc
->nr_to_write
<= 0 &&
708 wbc
->sync_mode
== WB_SYNC_NONE
)
711 xfs_start_page_writeback(page
, !page_dirty
, count
);
721 * Convert & write out a cluster of pages in the same extent as defined
722 * by mp and following the start page.
728 struct xfs_bmbt_irec
*imap
,
729 xfs_ioend_t
**ioendp
,
730 struct writeback_control
*wbc
,
736 pagevec_init(&pvec
, 0);
737 while (!done
&& tindex
<= tlast
) {
738 unsigned len
= min_t(pgoff_t
, PAGEVEC_SIZE
, tlast
- tindex
+ 1);
740 if (!pagevec_lookup(&pvec
, inode
->i_mapping
, tindex
, len
))
743 for (i
= 0; i
< pagevec_count(&pvec
); i
++) {
744 done
= xfs_convert_page(inode
, pvec
.pages
[i
], tindex
++,
750 pagevec_release(&pvec
);
756 xfs_vm_invalidatepage(
758 unsigned long offset
)
760 trace_xfs_invalidatepage(page
->mapping
->host
, page
, offset
);
761 block_invalidatepage(page
, offset
);
765 * If the page has delalloc buffers on it, we need to punch them out before we
766 * invalidate the page. If we don't, we leave a stale delalloc mapping on the
767 * inode that can trip a BUG() in xfs_get_blocks() later on if a direct IO read
768 * is done on that same region - the delalloc extent is returned when none is
769 * supposed to be there.
771 * We prevent this by truncating away the delalloc regions on the page before
772 * invalidating it. Because they are delalloc, we can do this without needing a
773 * transaction. Indeed - if we get ENOSPC errors, we have to be able to do this
774 * truncation without a transaction as there is no space left for block
775 * reservation (typically why we see a ENOSPC in writeback).
777 * This is not a performance critical path, so for now just do the punching a
778 * buffer head at a time.
781 xfs_aops_discard_page(
784 struct inode
*inode
= page
->mapping
->host
;
785 struct xfs_inode
*ip
= XFS_I(inode
);
786 struct buffer_head
*bh
, *head
;
787 loff_t offset
= page_offset(page
);
789 if (!xfs_is_delayed_page(page
, IO_DELALLOC
))
792 if (XFS_FORCED_SHUTDOWN(ip
->i_mount
))
795 xfs_alert(ip
->i_mount
,
796 "page discard on page %p, inode 0x%llx, offset %llu.",
797 page
, ip
->i_ino
, offset
);
799 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
800 bh
= head
= page_buffers(page
);
803 xfs_fileoff_t start_fsb
;
805 if (!buffer_delay(bh
))
808 start_fsb
= XFS_B_TO_FSBT(ip
->i_mount
, offset
);
809 error
= xfs_bmap_punch_delalloc_range(ip
, start_fsb
, 1);
811 /* something screwed, just bail */
812 if (!XFS_FORCED_SHUTDOWN(ip
->i_mount
)) {
813 xfs_alert(ip
->i_mount
,
814 "page discard unable to remove delalloc mapping.");
819 offset
+= 1 << inode
->i_blkbits
;
821 } while ((bh
= bh
->b_this_page
) != head
);
823 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
825 xfs_vm_invalidatepage(page
, 0);
830 * Write out a dirty page.
832 * For delalloc space on the page we need to allocate space and flush it.
833 * For unwritten space on the page we need to start the conversion to
834 * regular allocated space.
835 * For any other dirty buffer heads on the page we should flush them.
840 struct writeback_control
*wbc
)
842 struct inode
*inode
= page
->mapping
->host
;
843 struct buffer_head
*bh
, *head
;
844 struct xfs_bmbt_irec imap
;
845 xfs_ioend_t
*ioend
= NULL
, *iohead
= NULL
;
848 __uint64_t end_offset
;
849 pgoff_t end_index
, last_index
;
851 int err
, imap_valid
= 0, uptodate
= 1;
855 trace_xfs_writepage(inode
, page
, 0);
857 ASSERT(page_has_buffers(page
));
860 * Refuse to write the page out if we are called from reclaim context.
862 * This avoids stack overflows when called from deeply used stacks in
863 * random callers for direct reclaim or memcg reclaim. We explicitly
864 * allow reclaim from kswapd as the stack usage there is relatively low.
866 * This should never happen except in the case of a VM regression so
869 if (WARN_ON_ONCE((current
->flags
& (PF_MEMALLOC
|PF_KSWAPD
)) ==
874 * Given that we do not allow direct reclaim to call us, we should
875 * never be called while in a filesystem transaction.
877 if (WARN_ON(current
->flags
& PF_FSTRANS
))
880 /* Is this page beyond the end of the file? */
881 offset
= i_size_read(inode
);
882 end_index
= offset
>> PAGE_CACHE_SHIFT
;
883 last_index
= (offset
- 1) >> PAGE_CACHE_SHIFT
;
884 if (page
->index
>= end_index
) {
885 if ((page
->index
>= end_index
+ 1) ||
886 !(i_size_read(inode
) & (PAGE_CACHE_SIZE
- 1))) {
892 end_offset
= min_t(unsigned long long,
893 (xfs_off_t
)(page
->index
+ 1) << PAGE_CACHE_SHIFT
,
895 len
= 1 << inode
->i_blkbits
;
897 bh
= head
= page_buffers(page
);
898 offset
= page_offset(page
);
901 if (wbc
->sync_mode
== WB_SYNC_NONE
)
907 if (offset
>= end_offset
)
909 if (!buffer_uptodate(bh
))
913 * set_page_dirty dirties all buffers in a page, independent
914 * of their state. The dirty state however is entirely
915 * meaningless for holes (!mapped && uptodate), so skip
916 * buffers covering holes here.
918 if (!buffer_mapped(bh
) && buffer_uptodate(bh
)) {
923 if (buffer_unwritten(bh
)) {
924 if (type
!= IO_UNWRITTEN
) {
928 } else if (buffer_delay(bh
)) {
929 if (type
!= IO_DELALLOC
) {
933 } else if (buffer_uptodate(bh
)) {
934 if (type
!= IO_OVERWRITE
) {
939 if (PageUptodate(page
)) {
940 ASSERT(buffer_mapped(bh
));
947 imap_valid
= xfs_imap_valid(inode
, &imap
, offset
);
950 * If we didn't have a valid mapping then we need to
951 * put the new mapping into a separate ioend structure.
952 * This ensures non-contiguous extents always have
953 * separate ioends, which is particularly important
954 * for unwritten extent conversion at I/O completion
958 err
= xfs_map_blocks(inode
, offset
, &imap
, type
,
962 imap_valid
= xfs_imap_valid(inode
, &imap
, offset
);
966 if (type
!= IO_OVERWRITE
)
967 xfs_map_at_offset(inode
, bh
, &imap
, offset
);
968 xfs_add_to_ioend(inode
, bh
, offset
, type
, &ioend
,
976 } while (offset
+= len
, ((bh
= bh
->b_this_page
) != head
));
978 if (uptodate
&& bh
== head
)
979 SetPageUptodate(page
);
981 xfs_start_page_writeback(page
, 1, count
);
983 if (ioend
&& imap_valid
) {
986 end_index
= imap
.br_startoff
+ imap
.br_blockcount
;
989 end_index
<<= inode
->i_blkbits
;
992 end_index
= (end_index
- 1) >> PAGE_CACHE_SHIFT
;
994 /* check against file size */
995 if (end_index
> last_index
)
996 end_index
= last_index
;
998 xfs_cluster_write(inode
, page
->index
+ 1, &imap
, &ioend
,
1003 xfs_submit_ioend(wbc
, iohead
);
1009 xfs_cancel_ioend(iohead
);
1014 xfs_aops_discard_page(page
);
1015 ClearPageUptodate(page
);
1020 redirty_page_for_writepage(wbc
, page
);
1027 struct address_space
*mapping
,
1028 struct writeback_control
*wbc
)
1030 xfs_iflags_clear(XFS_I(mapping
->host
), XFS_ITRUNCATED
);
1031 return generic_writepages(mapping
, wbc
);
1035 * Called to move a page into cleanable state - and from there
1036 * to be released. The page should already be clean. We always
1037 * have buffer heads in this call.
1039 * Returns 1 if the page is ok to release, 0 otherwise.
1046 int delalloc
, unwritten
;
1048 trace_xfs_releasepage(page
->mapping
->host
, page
, 0);
1050 xfs_count_page_state(page
, &delalloc
, &unwritten
);
1052 if (WARN_ON(delalloc
))
1054 if (WARN_ON(unwritten
))
1057 return try_to_free_buffers(page
);
1062 struct inode
*inode
,
1064 struct buffer_head
*bh_result
,
1068 struct xfs_inode
*ip
= XFS_I(inode
);
1069 struct xfs_mount
*mp
= ip
->i_mount
;
1070 xfs_fileoff_t offset_fsb
, end_fsb
;
1073 struct xfs_bmbt_irec imap
;
1079 if (XFS_FORCED_SHUTDOWN(mp
))
1080 return -XFS_ERROR(EIO
);
1082 offset
= (xfs_off_t
)iblock
<< inode
->i_blkbits
;
1083 ASSERT(bh_result
->b_size
>= (1 << inode
->i_blkbits
));
1084 size
= bh_result
->b_size
;
1086 if (!create
&& direct
&& offset
>= i_size_read(inode
))
1090 lockmode
= XFS_ILOCK_EXCL
;
1091 xfs_ilock(ip
, lockmode
);
1093 lockmode
= xfs_ilock_map_shared(ip
);
1096 ASSERT(offset
<= mp
->m_maxioffset
);
1097 if (offset
+ size
> mp
->m_maxioffset
)
1098 size
= mp
->m_maxioffset
- offset
;
1099 end_fsb
= XFS_B_TO_FSB(mp
, (xfs_ufsize_t
)offset
+ size
);
1100 offset_fsb
= XFS_B_TO_FSBT(mp
, offset
);
1102 error
= xfs_bmapi_read(ip
, offset_fsb
, end_fsb
- offset_fsb
,
1103 &imap
, &nimaps
, XFS_BMAPI_ENTIRE
);
1109 (imap
.br_startblock
== HOLESTARTBLOCK
||
1110 imap
.br_startblock
== DELAYSTARTBLOCK
))) {
1112 error
= xfs_iomap_write_direct(ip
, offset
, size
,
1115 error
= xfs_iomap_write_delay(ip
, offset
, size
, &imap
);
1120 trace_xfs_get_blocks_alloc(ip
, offset
, size
, 0, &imap
);
1121 } else if (nimaps
) {
1122 trace_xfs_get_blocks_found(ip
, offset
, size
, 0, &imap
);
1124 trace_xfs_get_blocks_notfound(ip
, offset
, size
);
1127 xfs_iunlock(ip
, lockmode
);
1129 if (imap
.br_startblock
!= HOLESTARTBLOCK
&&
1130 imap
.br_startblock
!= DELAYSTARTBLOCK
) {
1132 * For unwritten extents do not report a disk address on
1133 * the read case (treat as if we're reading into a hole).
1135 if (create
|| !ISUNWRITTEN(&imap
))
1136 xfs_map_buffer(inode
, bh_result
, &imap
, offset
);
1137 if (create
&& ISUNWRITTEN(&imap
)) {
1139 bh_result
->b_private
= inode
;
1140 set_buffer_unwritten(bh_result
);
1145 * If this is a realtime file, data may be on a different device.
1146 * to that pointed to from the buffer_head b_bdev currently.
1148 bh_result
->b_bdev
= xfs_find_bdev_for_inode(inode
);
1151 * If we previously allocated a block out beyond eof and we are now
1152 * coming back to use it then we will need to flag it as new even if it
1153 * has a disk address.
1155 * With sub-block writes into unwritten extents we also need to mark
1156 * the buffer as new so that the unwritten parts of the buffer gets
1160 ((!buffer_mapped(bh_result
) && !buffer_uptodate(bh_result
)) ||
1161 (offset
>= i_size_read(inode
)) ||
1162 (new || ISUNWRITTEN(&imap
))))
1163 set_buffer_new(bh_result
);
1165 if (imap
.br_startblock
== DELAYSTARTBLOCK
) {
1168 set_buffer_uptodate(bh_result
);
1169 set_buffer_mapped(bh_result
);
1170 set_buffer_delay(bh_result
);
1175 * If this is O_DIRECT or the mpage code calling tell them how large
1176 * the mapping is, so that we can avoid repeated get_blocks calls.
1178 if (direct
|| size
> (1 << inode
->i_blkbits
)) {
1179 xfs_off_t mapping_size
;
1181 mapping_size
= imap
.br_startoff
+ imap
.br_blockcount
- iblock
;
1182 mapping_size
<<= inode
->i_blkbits
;
1184 ASSERT(mapping_size
> 0);
1185 if (mapping_size
> size
)
1186 mapping_size
= size
;
1187 if (mapping_size
> LONG_MAX
)
1188 mapping_size
= LONG_MAX
;
1190 bh_result
->b_size
= mapping_size
;
1196 xfs_iunlock(ip
, lockmode
);
1202 struct inode
*inode
,
1204 struct buffer_head
*bh_result
,
1207 return __xfs_get_blocks(inode
, iblock
, bh_result
, create
, 0);
1211 xfs_get_blocks_direct(
1212 struct inode
*inode
,
1214 struct buffer_head
*bh_result
,
1217 return __xfs_get_blocks(inode
, iblock
, bh_result
, create
, 1);
1221 * Complete a direct I/O write request.
1223 * If the private argument is non-NULL __xfs_get_blocks signals us that we
1224 * need to issue a transaction to convert the range from unwritten to written
1225 * extents. In case this is regular synchronous I/O we just call xfs_end_io
1226 * to do this and we are done. But in case this was a successful AIO
1227 * request this handler is called from interrupt context, from which we
1228 * can't start transactions. In that case offload the I/O completion to
1229 * the workqueues we also use for buffered I/O completion.
1232 xfs_end_io_direct_write(
1240 struct xfs_ioend
*ioend
= iocb
->private;
1243 * While the generic direct I/O code updates the inode size, it does
1244 * so only after the end_io handler is called, which means our
1245 * end_io handler thinks the on-disk size is outside the in-core
1246 * size. To prevent this just update it a little bit earlier here.
1248 if (offset
+ size
> i_size_read(ioend
->io_inode
))
1249 i_size_write(ioend
->io_inode
, offset
+ size
);
1252 * blockdev_direct_IO can return an error even after the I/O
1253 * completion handler was called. Thus we need to protect
1254 * against double-freeing.
1256 iocb
->private = NULL
;
1258 ioend
->io_offset
= offset
;
1259 ioend
->io_size
= size
;
1260 ioend
->io_iocb
= iocb
;
1261 ioend
->io_result
= ret
;
1262 if (private && size
> 0)
1263 ioend
->io_type
= IO_UNWRITTEN
;
1266 ioend
->io_isasync
= 1;
1267 xfs_finish_ioend(ioend
);
1269 xfs_finish_ioend_sync(ioend
);
1277 const struct iovec
*iov
,
1279 unsigned long nr_segs
)
1281 struct inode
*inode
= iocb
->ki_filp
->f_mapping
->host
;
1282 struct block_device
*bdev
= xfs_find_bdev_for_inode(inode
);
1286 iocb
->private = xfs_alloc_ioend(inode
, IO_DIRECT
);
1288 ret
= __blockdev_direct_IO(rw
, iocb
, inode
, bdev
, iov
,
1290 xfs_get_blocks_direct
,
1291 xfs_end_io_direct_write
, NULL
, 0);
1292 if (ret
!= -EIOCBQUEUED
&& iocb
->private)
1293 xfs_destroy_ioend(iocb
->private);
1295 ret
= __blockdev_direct_IO(rw
, iocb
, inode
, bdev
, iov
,
1297 xfs_get_blocks_direct
,
1305 xfs_vm_write_failed(
1306 struct address_space
*mapping
,
1309 struct inode
*inode
= mapping
->host
;
1311 if (to
> inode
->i_size
) {
1313 * Punch out the delalloc blocks we have already allocated.
1315 * Don't bother with xfs_setattr given that nothing can have
1316 * made it to disk yet as the page is still locked at this
1319 struct xfs_inode
*ip
= XFS_I(inode
);
1320 xfs_fileoff_t start_fsb
;
1321 xfs_fileoff_t end_fsb
;
1324 truncate_pagecache(inode
, to
, inode
->i_size
);
1327 * Check if there are any blocks that are outside of i_size
1328 * that need to be trimmed back.
1330 start_fsb
= XFS_B_TO_FSB(ip
->i_mount
, inode
->i_size
) + 1;
1331 end_fsb
= XFS_B_TO_FSB(ip
->i_mount
, to
);
1332 if (end_fsb
<= start_fsb
)
1335 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
1336 error
= xfs_bmap_punch_delalloc_range(ip
, start_fsb
,
1337 end_fsb
- start_fsb
);
1339 /* something screwed, just bail */
1340 if (!XFS_FORCED_SHUTDOWN(ip
->i_mount
)) {
1341 xfs_alert(ip
->i_mount
,
1342 "xfs_vm_write_failed: unable to clean up ino %lld",
1346 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
1353 struct address_space
*mapping
,
1357 struct page
**pagep
,
1362 ret
= block_write_begin(mapping
, pos
, len
, flags
| AOP_FLAG_NOFS
,
1363 pagep
, xfs_get_blocks
);
1365 xfs_vm_write_failed(mapping
, pos
+ len
);
1372 struct address_space
*mapping
,
1381 ret
= generic_write_end(file
, mapping
, pos
, len
, copied
, page
, fsdata
);
1382 if (unlikely(ret
< len
))
1383 xfs_vm_write_failed(mapping
, pos
+ len
);
1389 struct address_space
*mapping
,
1392 struct inode
*inode
= (struct inode
*)mapping
->host
;
1393 struct xfs_inode
*ip
= XFS_I(inode
);
1395 trace_xfs_vm_bmap(XFS_I(inode
));
1396 xfs_ilock(ip
, XFS_IOLOCK_SHARED
);
1397 xfs_flush_pages(ip
, (xfs_off_t
)0, -1, 0, FI_REMAPF
);
1398 xfs_iunlock(ip
, XFS_IOLOCK_SHARED
);
1399 return generic_block_bmap(mapping
, block
, xfs_get_blocks
);
1404 struct file
*unused
,
1407 return mpage_readpage(page
, xfs_get_blocks
);
1412 struct file
*unused
,
1413 struct address_space
*mapping
,
1414 struct list_head
*pages
,
1417 return mpage_readpages(mapping
, pages
, nr_pages
, xfs_get_blocks
);
1420 const struct address_space_operations xfs_address_space_operations
= {
1421 .readpage
= xfs_vm_readpage
,
1422 .readpages
= xfs_vm_readpages
,
1423 .writepage
= xfs_vm_writepage
,
1424 .writepages
= xfs_vm_writepages
,
1425 .releasepage
= xfs_vm_releasepage
,
1426 .invalidatepage
= xfs_vm_invalidatepage
,
1427 .write_begin
= xfs_vm_write_begin
,
1428 .write_end
= xfs_vm_write_end
,
1429 .bmap
= xfs_vm_bmap
,
1430 .direct_IO
= xfs_vm_direct_IO
,
1431 .migratepage
= buffer_migrate_page
,
1432 .is_partially_uptodate
= block_is_partially_uptodate
,
1433 .error_remove_page
= generic_error_remove_page
,