2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 #include "xfs_trans.h"
23 #include "xfs_mount.h"
24 #include "xfs_bmap_btree.h"
25 #include "xfs_dinode.h"
26 #include "xfs_inode.h"
27 #include "xfs_inode_item.h"
28 #include "xfs_alloc.h"
29 #include "xfs_error.h"
30 #include "xfs_iomap.h"
31 #include "xfs_vnodeops.h"
32 #include "xfs_trace.h"
34 #include <linux/gfp.h>
35 #include <linux/mpage.h>
36 #include <linux/pagevec.h>
37 #include <linux/writeback.h>
45 struct buffer_head
*bh
, *head
;
47 *delalloc
= *unwritten
= 0;
49 bh
= head
= page_buffers(page
);
51 if (buffer_unwritten(bh
))
53 else if (buffer_delay(bh
))
55 } while ((bh
= bh
->b_this_page
) != head
);
58 STATIC
struct block_device
*
59 xfs_find_bdev_for_inode(
62 struct xfs_inode
*ip
= XFS_I(inode
);
63 struct xfs_mount
*mp
= ip
->i_mount
;
65 if (XFS_IS_REALTIME_INODE(ip
))
66 return mp
->m_rtdev_targp
->bt_bdev
;
68 return mp
->m_ddev_targp
->bt_bdev
;
72 * We're now finished for good with this ioend structure.
73 * Update the page state via the associated buffer_heads,
74 * release holds on the inode and bio, and finally free
75 * up memory. Do not use the ioend after this.
81 struct buffer_head
*bh
, *next
;
83 for (bh
= ioend
->io_buffer_head
; bh
; bh
= next
) {
85 bh
->b_end_io(bh
, !ioend
->io_error
);
89 if (ioend
->io_isasync
) {
90 aio_complete(ioend
->io_iocb
, ioend
->io_error
?
91 ioend
->io_error
: ioend
->io_result
, 0);
93 inode_dio_done(ioend
->io_inode
);
96 mempool_free(ioend
, xfs_ioend_pool
);
100 * Fast and loose check if this write could update the on-disk inode size.
102 static inline bool xfs_ioend_is_append(struct xfs_ioend
*ioend
)
104 return ioend
->io_offset
+ ioend
->io_size
>
105 XFS_I(ioend
->io_inode
)->i_d
.di_size
;
109 xfs_setfilesize_trans_alloc(
110 struct xfs_ioend
*ioend
)
112 struct xfs_mount
*mp
= XFS_I(ioend
->io_inode
)->i_mount
;
113 struct xfs_trans
*tp
;
116 tp
= xfs_trans_alloc(mp
, XFS_TRANS_FSYNC_TS
);
118 error
= xfs_trans_reserve(tp
, 0, XFS_FSYNC_TS_LOG_RES(mp
), 0, 0, 0);
120 xfs_trans_cancel(tp
, 0);
124 ioend
->io_append_trans
= tp
;
127 * We hand off the transaction to the completion thread now, so
128 * clear the flag here.
130 current_restore_flags_nested(&tp
->t_pflags
, PF_FSTRANS
);
135 * Update on-disk file size now that data has been written to disk.
139 struct xfs_ioend
*ioend
)
141 struct xfs_inode
*ip
= XFS_I(ioend
->io_inode
);
142 struct xfs_trans
*tp
= ioend
->io_append_trans
;
146 * The transaction was allocated in the I/O submission thread,
147 * thus we need to mark ourselves as beeing in a transaction
150 current_set_flags_nested(&tp
->t_pflags
, PF_FSTRANS
);
152 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
153 isize
= xfs_new_eof(ip
, ioend
->io_offset
+ ioend
->io_size
);
155 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
156 xfs_trans_cancel(tp
, 0);
160 trace_xfs_setfilesize(ip
, ioend
->io_offset
, ioend
->io_size
);
162 ip
->i_d
.di_size
= isize
;
163 xfs_trans_ijoin(tp
, ip
, XFS_ILOCK_EXCL
);
164 xfs_trans_log_inode(tp
, ip
, XFS_ILOG_CORE
);
166 return xfs_trans_commit(tp
, 0);
170 * Schedule IO completion handling on the final put of an ioend.
172 * If there is no work to do we might as well call it a day and free the
177 struct xfs_ioend
*ioend
)
179 if (atomic_dec_and_test(&ioend
->io_remaining
)) {
180 struct xfs_mount
*mp
= XFS_I(ioend
->io_inode
)->i_mount
;
182 if (ioend
->io_type
== IO_UNWRITTEN
)
183 queue_work(mp
->m_unwritten_workqueue
, &ioend
->io_work
);
184 else if (ioend
->io_append_trans
)
185 queue_work(mp
->m_data_workqueue
, &ioend
->io_work
);
187 xfs_destroy_ioend(ioend
);
192 * IO write completion.
196 struct work_struct
*work
)
198 xfs_ioend_t
*ioend
= container_of(work
, xfs_ioend_t
, io_work
);
199 struct xfs_inode
*ip
= XFS_I(ioend
->io_inode
);
202 if (XFS_FORCED_SHUTDOWN(ip
->i_mount
)) {
203 ioend
->io_error
= -EIO
;
210 * For unwritten extents we need to issue transactions to convert a
211 * range to normal written extens after the data I/O has finished.
213 if (ioend
->io_type
== IO_UNWRITTEN
) {
215 * For buffered I/O we never preallocate a transaction when
216 * doing the unwritten extent conversion, but for direct I/O
217 * we do not know if we are converting an unwritten extent
218 * or not at the point where we preallocate the transaction.
220 if (ioend
->io_append_trans
) {
221 ASSERT(ioend
->io_isdirect
);
223 current_set_flags_nested(
224 &ioend
->io_append_trans
->t_pflags
, PF_FSTRANS
);
225 xfs_trans_cancel(ioend
->io_append_trans
, 0);
228 error
= xfs_iomap_write_unwritten(ip
, ioend
->io_offset
,
231 ioend
->io_error
= -error
;
234 } else if (ioend
->io_append_trans
) {
235 error
= xfs_setfilesize(ioend
);
237 ioend
->io_error
= -error
;
239 ASSERT(!xfs_ioend_is_append(ioend
));
243 xfs_destroy_ioend(ioend
);
247 * Call IO completion handling in caller context on the final put of an ioend.
250 xfs_finish_ioend_sync(
251 struct xfs_ioend
*ioend
)
253 if (atomic_dec_and_test(&ioend
->io_remaining
))
254 xfs_end_io(&ioend
->io_work
);
258 * Allocate and initialise an IO completion structure.
259 * We need to track unwritten extent write completion here initially.
260 * We'll need to extend this for updating the ondisk inode size later
270 ioend
= mempool_alloc(xfs_ioend_pool
, GFP_NOFS
);
273 * Set the count to 1 initially, which will prevent an I/O
274 * completion callback from happening before we have started
275 * all the I/O from calling the completion routine too early.
277 atomic_set(&ioend
->io_remaining
, 1);
278 ioend
->io_isasync
= 0;
279 ioend
->io_isdirect
= 0;
281 ioend
->io_list
= NULL
;
282 ioend
->io_type
= type
;
283 ioend
->io_inode
= inode
;
284 ioend
->io_buffer_head
= NULL
;
285 ioend
->io_buffer_tail
= NULL
;
286 ioend
->io_offset
= 0;
288 ioend
->io_iocb
= NULL
;
289 ioend
->io_result
= 0;
290 ioend
->io_append_trans
= NULL
;
292 INIT_WORK(&ioend
->io_work
, xfs_end_io
);
300 struct xfs_bmbt_irec
*imap
,
304 struct xfs_inode
*ip
= XFS_I(inode
);
305 struct xfs_mount
*mp
= ip
->i_mount
;
306 ssize_t count
= 1 << inode
->i_blkbits
;
307 xfs_fileoff_t offset_fsb
, end_fsb
;
309 int bmapi_flags
= XFS_BMAPI_ENTIRE
;
312 if (XFS_FORCED_SHUTDOWN(mp
))
313 return -XFS_ERROR(EIO
);
315 if (type
== IO_UNWRITTEN
)
316 bmapi_flags
|= XFS_BMAPI_IGSTATE
;
318 if (!xfs_ilock_nowait(ip
, XFS_ILOCK_SHARED
)) {
320 return -XFS_ERROR(EAGAIN
);
321 xfs_ilock(ip
, XFS_ILOCK_SHARED
);
324 ASSERT(ip
->i_d
.di_format
!= XFS_DINODE_FMT_BTREE
||
325 (ip
->i_df
.if_flags
& XFS_IFEXTENTS
));
326 ASSERT(offset
<= mp
->m_maxioffset
);
328 if (offset
+ count
> mp
->m_maxioffset
)
329 count
= mp
->m_maxioffset
- offset
;
330 end_fsb
= XFS_B_TO_FSB(mp
, (xfs_ufsize_t
)offset
+ count
);
331 offset_fsb
= XFS_B_TO_FSBT(mp
, offset
);
332 error
= xfs_bmapi_read(ip
, offset_fsb
, end_fsb
- offset_fsb
,
333 imap
, &nimaps
, bmapi_flags
);
334 xfs_iunlock(ip
, XFS_ILOCK_SHARED
);
337 return -XFS_ERROR(error
);
339 if (type
== IO_DELALLOC
&&
340 (!nimaps
|| isnullstartblock(imap
->br_startblock
))) {
341 error
= xfs_iomap_write_allocate(ip
, offset
, count
, imap
);
343 trace_xfs_map_blocks_alloc(ip
, offset
, count
, type
, imap
);
344 return -XFS_ERROR(error
);
348 if (type
== IO_UNWRITTEN
) {
350 ASSERT(imap
->br_startblock
!= HOLESTARTBLOCK
);
351 ASSERT(imap
->br_startblock
!= DELAYSTARTBLOCK
);
355 trace_xfs_map_blocks_found(ip
, offset
, count
, type
, imap
);
362 struct xfs_bmbt_irec
*imap
,
365 offset
>>= inode
->i_blkbits
;
367 return offset
>= imap
->br_startoff
&&
368 offset
< imap
->br_startoff
+ imap
->br_blockcount
;
372 * BIO completion handler for buffered IO.
379 xfs_ioend_t
*ioend
= bio
->bi_private
;
381 ASSERT(atomic_read(&bio
->bi_cnt
) >= 1);
382 ioend
->io_error
= test_bit(BIO_UPTODATE
, &bio
->bi_flags
) ? 0 : error
;
384 /* Toss bio and pass work off to an xfsdatad thread */
385 bio
->bi_private
= NULL
;
386 bio
->bi_end_io
= NULL
;
389 xfs_finish_ioend(ioend
);
393 xfs_submit_ioend_bio(
394 struct writeback_control
*wbc
,
398 atomic_inc(&ioend
->io_remaining
);
399 bio
->bi_private
= ioend
;
400 bio
->bi_end_io
= xfs_end_bio
;
401 submit_bio(wbc
->sync_mode
== WB_SYNC_ALL
? WRITE_SYNC
: WRITE
, bio
);
406 struct buffer_head
*bh
)
408 int nvecs
= bio_get_nr_vecs(bh
->b_bdev
);
409 struct bio
*bio
= bio_alloc(GFP_NOIO
, nvecs
);
411 ASSERT(bio
->bi_private
== NULL
);
412 bio
->bi_sector
= bh
->b_blocknr
* (bh
->b_size
>> 9);
413 bio
->bi_bdev
= bh
->b_bdev
;
418 xfs_start_buffer_writeback(
419 struct buffer_head
*bh
)
421 ASSERT(buffer_mapped(bh
));
422 ASSERT(buffer_locked(bh
));
423 ASSERT(!buffer_delay(bh
));
424 ASSERT(!buffer_unwritten(bh
));
426 mark_buffer_async_write(bh
);
427 set_buffer_uptodate(bh
);
428 clear_buffer_dirty(bh
);
432 xfs_start_page_writeback(
437 ASSERT(PageLocked(page
));
438 ASSERT(!PageWriteback(page
));
440 clear_page_dirty_for_io(page
);
441 set_page_writeback(page
);
443 /* If no buffers on the page are to be written, finish it here */
445 end_page_writeback(page
);
448 static inline int bio_add_buffer(struct bio
*bio
, struct buffer_head
*bh
)
450 return bio_add_page(bio
, bh
->b_page
, bh
->b_size
, bh_offset(bh
));
454 * Submit all of the bios for all of the ioends we have saved up, covering the
455 * initial writepage page and also any probed pages.
457 * Because we may have multiple ioends spanning a page, we need to start
458 * writeback on all the buffers before we submit them for I/O. If we mark the
459 * buffers as we got, then we can end up with a page that only has buffers
460 * marked async write and I/O complete on can occur before we mark the other
461 * buffers async write.
463 * The end result of this is that we trip a bug in end_page_writeback() because
464 * we call it twice for the one page as the code in end_buffer_async_write()
465 * assumes that all buffers on the page are started at the same time.
467 * The fix is two passes across the ioend list - one to start writeback on the
468 * buffer_heads, and then submit them for I/O on the second pass.
472 struct writeback_control
*wbc
,
475 xfs_ioend_t
*head
= ioend
;
477 struct buffer_head
*bh
;
479 sector_t lastblock
= 0;
481 /* Pass 1 - start writeback */
483 next
= ioend
->io_list
;
484 for (bh
= ioend
->io_buffer_head
; bh
; bh
= bh
->b_private
)
485 xfs_start_buffer_writeback(bh
);
486 } while ((ioend
= next
) != NULL
);
488 /* Pass 2 - submit I/O */
491 next
= ioend
->io_list
;
494 for (bh
= ioend
->io_buffer_head
; bh
; bh
= bh
->b_private
) {
498 bio
= xfs_alloc_ioend_bio(bh
);
499 } else if (bh
->b_blocknr
!= lastblock
+ 1) {
500 xfs_submit_ioend_bio(wbc
, ioend
, bio
);
504 if (bio_add_buffer(bio
, bh
) != bh
->b_size
) {
505 xfs_submit_ioend_bio(wbc
, ioend
, bio
);
509 lastblock
= bh
->b_blocknr
;
512 xfs_submit_ioend_bio(wbc
, ioend
, bio
);
513 xfs_finish_ioend(ioend
);
514 } while ((ioend
= next
) != NULL
);
518 * Cancel submission of all buffer_heads so far in this endio.
519 * Toss the endio too. Only ever called for the initial page
520 * in a writepage request, so only ever one page.
527 struct buffer_head
*bh
, *next_bh
;
530 next
= ioend
->io_list
;
531 bh
= ioend
->io_buffer_head
;
533 next_bh
= bh
->b_private
;
534 clear_buffer_async_write(bh
);
536 } while ((bh
= next_bh
) != NULL
);
538 mempool_free(ioend
, xfs_ioend_pool
);
539 } while ((ioend
= next
) != NULL
);
543 * Test to see if we've been building up a completion structure for
544 * earlier buffers -- if so, we try to append to this ioend if we
545 * can, otherwise we finish off any current ioend and start another.
546 * Return true if we've finished the given ioend.
551 struct buffer_head
*bh
,
554 xfs_ioend_t
**result
,
557 xfs_ioend_t
*ioend
= *result
;
559 if (!ioend
|| need_ioend
|| type
!= ioend
->io_type
) {
560 xfs_ioend_t
*previous
= *result
;
562 ioend
= xfs_alloc_ioend(inode
, type
);
563 ioend
->io_offset
= offset
;
564 ioend
->io_buffer_head
= bh
;
565 ioend
->io_buffer_tail
= bh
;
567 previous
->io_list
= ioend
;
570 ioend
->io_buffer_tail
->b_private
= bh
;
571 ioend
->io_buffer_tail
= bh
;
574 bh
->b_private
= NULL
;
575 ioend
->io_size
+= bh
->b_size
;
581 struct buffer_head
*bh
,
582 struct xfs_bmbt_irec
*imap
,
586 struct xfs_mount
*m
= XFS_I(inode
)->i_mount
;
587 xfs_off_t iomap_offset
= XFS_FSB_TO_B(m
, imap
->br_startoff
);
588 xfs_daddr_t iomap_bn
= xfs_fsb_to_db(XFS_I(inode
), imap
->br_startblock
);
590 ASSERT(imap
->br_startblock
!= HOLESTARTBLOCK
);
591 ASSERT(imap
->br_startblock
!= DELAYSTARTBLOCK
);
593 bn
= (iomap_bn
>> (inode
->i_blkbits
- BBSHIFT
)) +
594 ((offset
- iomap_offset
) >> inode
->i_blkbits
);
596 ASSERT(bn
|| XFS_IS_REALTIME_INODE(XFS_I(inode
)));
599 set_buffer_mapped(bh
);
605 struct buffer_head
*bh
,
606 struct xfs_bmbt_irec
*imap
,
609 ASSERT(imap
->br_startblock
!= HOLESTARTBLOCK
);
610 ASSERT(imap
->br_startblock
!= DELAYSTARTBLOCK
);
612 xfs_map_buffer(inode
, bh
, imap
, offset
);
613 set_buffer_mapped(bh
);
614 clear_buffer_delay(bh
);
615 clear_buffer_unwritten(bh
);
619 * Test if a given page is suitable for writing as part of an unwritten
620 * or delayed allocate extent.
627 if (PageWriteback(page
))
630 if (page
->mapping
&& page_has_buffers(page
)) {
631 struct buffer_head
*bh
, *head
;
634 bh
= head
= page_buffers(page
);
636 if (buffer_unwritten(bh
))
637 acceptable
+= (type
== IO_UNWRITTEN
);
638 else if (buffer_delay(bh
))
639 acceptable
+= (type
== IO_DELALLOC
);
640 else if (buffer_dirty(bh
) && buffer_mapped(bh
))
641 acceptable
+= (type
== IO_OVERWRITE
);
644 } while ((bh
= bh
->b_this_page
) != head
);
654 * Allocate & map buffers for page given the extent map. Write it out.
655 * except for the original page of a writepage, this is called on
656 * delalloc/unwritten pages only, for the original page it is possible
657 * that the page has no mapping at all.
664 struct xfs_bmbt_irec
*imap
,
665 xfs_ioend_t
**ioendp
,
666 struct writeback_control
*wbc
)
668 struct buffer_head
*bh
, *head
;
669 xfs_off_t end_offset
;
670 unsigned long p_offset
;
673 int count
= 0, done
= 0, uptodate
= 1;
674 xfs_off_t offset
= page_offset(page
);
676 if (page
->index
!= tindex
)
678 if (!trylock_page(page
))
680 if (PageWriteback(page
))
681 goto fail_unlock_page
;
682 if (page
->mapping
!= inode
->i_mapping
)
683 goto fail_unlock_page
;
684 if (!xfs_check_page_type(page
, (*ioendp
)->io_type
))
685 goto fail_unlock_page
;
688 * page_dirty is initially a count of buffers on the page before
689 * EOF and is decremented as we move each into a cleanable state.
693 * End offset is the highest offset that this page should represent.
694 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
695 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
696 * hence give us the correct page_dirty count. On any other page,
697 * it will be zero and in that case we need page_dirty to be the
698 * count of buffers on the page.
700 end_offset
= min_t(unsigned long long,
701 (xfs_off_t
)(page
->index
+ 1) << PAGE_CACHE_SHIFT
,
704 len
= 1 << inode
->i_blkbits
;
705 p_offset
= min_t(unsigned long, end_offset
& (PAGE_CACHE_SIZE
- 1),
707 p_offset
= p_offset
? roundup(p_offset
, len
) : PAGE_CACHE_SIZE
;
708 page_dirty
= p_offset
/ len
;
710 bh
= head
= page_buffers(page
);
712 if (offset
>= end_offset
)
714 if (!buffer_uptodate(bh
))
716 if (!(PageUptodate(page
) || buffer_uptodate(bh
))) {
721 if (buffer_unwritten(bh
) || buffer_delay(bh
) ||
723 if (buffer_unwritten(bh
))
725 else if (buffer_delay(bh
))
730 if (!xfs_imap_valid(inode
, imap
, offset
)) {
736 if (type
!= IO_OVERWRITE
)
737 xfs_map_at_offset(inode
, bh
, imap
, offset
);
738 xfs_add_to_ioend(inode
, bh
, offset
, type
,
746 } while (offset
+= len
, (bh
= bh
->b_this_page
) != head
);
748 if (uptodate
&& bh
== head
)
749 SetPageUptodate(page
);
752 if (--wbc
->nr_to_write
<= 0 &&
753 wbc
->sync_mode
== WB_SYNC_NONE
)
756 xfs_start_page_writeback(page
, !page_dirty
, count
);
766 * Convert & write out a cluster of pages in the same extent as defined
767 * by mp and following the start page.
773 struct xfs_bmbt_irec
*imap
,
774 xfs_ioend_t
**ioendp
,
775 struct writeback_control
*wbc
,
781 pagevec_init(&pvec
, 0);
782 while (!done
&& tindex
<= tlast
) {
783 unsigned len
= min_t(pgoff_t
, PAGEVEC_SIZE
, tlast
- tindex
+ 1);
785 if (!pagevec_lookup(&pvec
, inode
->i_mapping
, tindex
, len
))
788 for (i
= 0; i
< pagevec_count(&pvec
); i
++) {
789 done
= xfs_convert_page(inode
, pvec
.pages
[i
], tindex
++,
795 pagevec_release(&pvec
);
801 xfs_vm_invalidatepage(
803 unsigned long offset
)
805 trace_xfs_invalidatepage(page
->mapping
->host
, page
, offset
);
806 block_invalidatepage(page
, offset
);
810 * If the page has delalloc buffers on it, we need to punch them out before we
811 * invalidate the page. If we don't, we leave a stale delalloc mapping on the
812 * inode that can trip a BUG() in xfs_get_blocks() later on if a direct IO read
813 * is done on that same region - the delalloc extent is returned when none is
814 * supposed to be there.
816 * We prevent this by truncating away the delalloc regions on the page before
817 * invalidating it. Because they are delalloc, we can do this without needing a
818 * transaction. Indeed - if we get ENOSPC errors, we have to be able to do this
819 * truncation without a transaction as there is no space left for block
820 * reservation (typically why we see a ENOSPC in writeback).
822 * This is not a performance critical path, so for now just do the punching a
823 * buffer head at a time.
826 xfs_aops_discard_page(
829 struct inode
*inode
= page
->mapping
->host
;
830 struct xfs_inode
*ip
= XFS_I(inode
);
831 struct buffer_head
*bh
, *head
;
832 loff_t offset
= page_offset(page
);
834 if (!xfs_check_page_type(page
, IO_DELALLOC
))
837 if (XFS_FORCED_SHUTDOWN(ip
->i_mount
))
840 xfs_alert(ip
->i_mount
,
841 "page discard on page %p, inode 0x%llx, offset %llu.",
842 page
, ip
->i_ino
, offset
);
844 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
845 bh
= head
= page_buffers(page
);
848 xfs_fileoff_t start_fsb
;
850 if (!buffer_delay(bh
))
853 start_fsb
= XFS_B_TO_FSBT(ip
->i_mount
, offset
);
854 error
= xfs_bmap_punch_delalloc_range(ip
, start_fsb
, 1);
856 /* something screwed, just bail */
857 if (!XFS_FORCED_SHUTDOWN(ip
->i_mount
)) {
858 xfs_alert(ip
->i_mount
,
859 "page discard unable to remove delalloc mapping.");
864 offset
+= 1 << inode
->i_blkbits
;
866 } while ((bh
= bh
->b_this_page
) != head
);
868 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
870 xfs_vm_invalidatepage(page
, 0);
875 * Write out a dirty page.
877 * For delalloc space on the page we need to allocate space and flush it.
878 * For unwritten space on the page we need to start the conversion to
879 * regular allocated space.
880 * For any other dirty buffer heads on the page we should flush them.
885 struct writeback_control
*wbc
)
887 struct inode
*inode
= page
->mapping
->host
;
888 struct buffer_head
*bh
, *head
;
889 struct xfs_bmbt_irec imap
;
890 xfs_ioend_t
*ioend
= NULL
, *iohead
= NULL
;
893 __uint64_t end_offset
;
894 pgoff_t end_index
, last_index
;
896 int err
, imap_valid
= 0, uptodate
= 1;
900 trace_xfs_writepage(inode
, page
, 0);
902 ASSERT(page_has_buffers(page
));
905 * Refuse to write the page out if we are called from reclaim context.
907 * This avoids stack overflows when called from deeply used stacks in
908 * random callers for direct reclaim or memcg reclaim. We explicitly
909 * allow reclaim from kswapd as the stack usage there is relatively low.
911 * This should never happen except in the case of a VM regression so
914 if (WARN_ON_ONCE((current
->flags
& (PF_MEMALLOC
|PF_KSWAPD
)) ==
919 * Given that we do not allow direct reclaim to call us, we should
920 * never be called while in a filesystem transaction.
922 if (WARN_ON(current
->flags
& PF_FSTRANS
))
925 /* Is this page beyond the end of the file? */
926 offset
= i_size_read(inode
);
927 end_index
= offset
>> PAGE_CACHE_SHIFT
;
928 last_index
= (offset
- 1) >> PAGE_CACHE_SHIFT
;
929 if (page
->index
>= end_index
) {
930 if ((page
->index
>= end_index
+ 1) ||
931 !(i_size_read(inode
) & (PAGE_CACHE_SIZE
- 1))) {
937 end_offset
= min_t(unsigned long long,
938 (xfs_off_t
)(page
->index
+ 1) << PAGE_CACHE_SHIFT
,
940 len
= 1 << inode
->i_blkbits
;
942 bh
= head
= page_buffers(page
);
943 offset
= page_offset(page
);
946 if (wbc
->sync_mode
== WB_SYNC_NONE
)
952 if (offset
>= end_offset
)
954 if (!buffer_uptodate(bh
))
958 * set_page_dirty dirties all buffers in a page, independent
959 * of their state. The dirty state however is entirely
960 * meaningless for holes (!mapped && uptodate), so skip
961 * buffers covering holes here.
963 if (!buffer_mapped(bh
) && buffer_uptodate(bh
)) {
968 if (buffer_unwritten(bh
)) {
969 if (type
!= IO_UNWRITTEN
) {
973 } else if (buffer_delay(bh
)) {
974 if (type
!= IO_DELALLOC
) {
978 } else if (buffer_uptodate(bh
)) {
979 if (type
!= IO_OVERWRITE
) {
984 if (PageUptodate(page
))
985 ASSERT(buffer_mapped(bh
));
987 * This buffer is not uptodate and will not be
988 * written to disk. Ensure that we will put any
989 * subsequent writeable buffers into a new
997 imap_valid
= xfs_imap_valid(inode
, &imap
, offset
);
1000 * If we didn't have a valid mapping then we need to
1001 * put the new mapping into a separate ioend structure.
1002 * This ensures non-contiguous extents always have
1003 * separate ioends, which is particularly important
1004 * for unwritten extent conversion at I/O completion
1008 err
= xfs_map_blocks(inode
, offset
, &imap
, type
,
1012 imap_valid
= xfs_imap_valid(inode
, &imap
, offset
);
1016 if (type
!= IO_OVERWRITE
)
1017 xfs_map_at_offset(inode
, bh
, &imap
, offset
);
1018 xfs_add_to_ioend(inode
, bh
, offset
, type
, &ioend
,
1026 } while (offset
+= len
, ((bh
= bh
->b_this_page
) != head
));
1028 if (uptodate
&& bh
== head
)
1029 SetPageUptodate(page
);
1031 xfs_start_page_writeback(page
, 1, count
);
1033 if (ioend
&& imap_valid
) {
1034 xfs_off_t end_index
;
1036 end_index
= imap
.br_startoff
+ imap
.br_blockcount
;
1039 end_index
<<= inode
->i_blkbits
;
1042 end_index
= (end_index
- 1) >> PAGE_CACHE_SHIFT
;
1044 /* check against file size */
1045 if (end_index
> last_index
)
1046 end_index
= last_index
;
1048 xfs_cluster_write(inode
, page
->index
+ 1, &imap
, &ioend
,
1054 * Reserve log space if we might write beyond the on-disk
1057 if (ioend
->io_type
!= IO_UNWRITTEN
&&
1058 xfs_ioend_is_append(ioend
)) {
1059 err
= xfs_setfilesize_trans_alloc(ioend
);
1064 xfs_submit_ioend(wbc
, iohead
);
1071 xfs_cancel_ioend(iohead
);
1076 xfs_aops_discard_page(page
);
1077 ClearPageUptodate(page
);
1082 redirty_page_for_writepage(wbc
, page
);
1089 struct address_space
*mapping
,
1090 struct writeback_control
*wbc
)
1092 xfs_iflags_clear(XFS_I(mapping
->host
), XFS_ITRUNCATED
);
1093 return generic_writepages(mapping
, wbc
);
1097 * Called to move a page into cleanable state - and from there
1098 * to be released. The page should already be clean. We always
1099 * have buffer heads in this call.
1101 * Returns 1 if the page is ok to release, 0 otherwise.
1108 int delalloc
, unwritten
;
1110 trace_xfs_releasepage(page
->mapping
->host
, page
, 0);
1112 xfs_count_page_state(page
, &delalloc
, &unwritten
);
1114 if (WARN_ON(delalloc
))
1116 if (WARN_ON(unwritten
))
1119 return try_to_free_buffers(page
);
1124 struct inode
*inode
,
1126 struct buffer_head
*bh_result
,
1130 struct xfs_inode
*ip
= XFS_I(inode
);
1131 struct xfs_mount
*mp
= ip
->i_mount
;
1132 xfs_fileoff_t offset_fsb
, end_fsb
;
1135 struct xfs_bmbt_irec imap
;
1141 if (XFS_FORCED_SHUTDOWN(mp
))
1142 return -XFS_ERROR(EIO
);
1144 offset
= (xfs_off_t
)iblock
<< inode
->i_blkbits
;
1145 ASSERT(bh_result
->b_size
>= (1 << inode
->i_blkbits
));
1146 size
= bh_result
->b_size
;
1148 if (!create
&& direct
&& offset
>= i_size_read(inode
))
1152 * Direct I/O is usually done on preallocated files, so try getting
1153 * a block mapping without an exclusive lock first. For buffered
1154 * writes we already have the exclusive iolock anyway, so avoiding
1155 * a lock roundtrip here by taking the ilock exclusive from the
1156 * beginning is a useful micro optimization.
1158 if (create
&& !direct
) {
1159 lockmode
= XFS_ILOCK_EXCL
;
1160 xfs_ilock(ip
, lockmode
);
1162 lockmode
= xfs_ilock_map_shared(ip
);
1165 ASSERT(offset
<= mp
->m_maxioffset
);
1166 if (offset
+ size
> mp
->m_maxioffset
)
1167 size
= mp
->m_maxioffset
- offset
;
1168 end_fsb
= XFS_B_TO_FSB(mp
, (xfs_ufsize_t
)offset
+ size
);
1169 offset_fsb
= XFS_B_TO_FSBT(mp
, offset
);
1171 error
= xfs_bmapi_read(ip
, offset_fsb
, end_fsb
- offset_fsb
,
1172 &imap
, &nimaps
, XFS_BMAPI_ENTIRE
);
1178 (imap
.br_startblock
== HOLESTARTBLOCK
||
1179 imap
.br_startblock
== DELAYSTARTBLOCK
))) {
1180 if (direct
|| xfs_get_extsz_hint(ip
)) {
1182 * Drop the ilock in preparation for starting the block
1183 * allocation transaction. It will be retaken
1184 * exclusively inside xfs_iomap_write_direct for the
1185 * actual allocation.
1187 xfs_iunlock(ip
, lockmode
);
1188 error
= xfs_iomap_write_direct(ip
, offset
, size
,
1195 * Delalloc reservations do not require a transaction,
1196 * we can go on without dropping the lock here. If we
1197 * are allocating a new delalloc block, make sure that
1198 * we set the new flag so that we mark the buffer new so
1199 * that we know that it is newly allocated if the write
1202 if (nimaps
&& imap
.br_startblock
== HOLESTARTBLOCK
)
1204 error
= xfs_iomap_write_delay(ip
, offset
, size
, &imap
);
1208 xfs_iunlock(ip
, lockmode
);
1211 trace_xfs_get_blocks_alloc(ip
, offset
, size
, 0, &imap
);
1212 } else if (nimaps
) {
1213 trace_xfs_get_blocks_found(ip
, offset
, size
, 0, &imap
);
1214 xfs_iunlock(ip
, lockmode
);
1216 trace_xfs_get_blocks_notfound(ip
, offset
, size
);
1220 if (imap
.br_startblock
!= HOLESTARTBLOCK
&&
1221 imap
.br_startblock
!= DELAYSTARTBLOCK
) {
1223 * For unwritten extents do not report a disk address on
1224 * the read case (treat as if we're reading into a hole).
1226 if (create
|| !ISUNWRITTEN(&imap
))
1227 xfs_map_buffer(inode
, bh_result
, &imap
, offset
);
1228 if (create
&& ISUNWRITTEN(&imap
)) {
1230 bh_result
->b_private
= inode
;
1231 set_buffer_unwritten(bh_result
);
1236 * If this is a realtime file, data may be on a different device.
1237 * to that pointed to from the buffer_head b_bdev currently.
1239 bh_result
->b_bdev
= xfs_find_bdev_for_inode(inode
);
1242 * If we previously allocated a block out beyond eof and we are now
1243 * coming back to use it then we will need to flag it as new even if it
1244 * has a disk address.
1246 * With sub-block writes into unwritten extents we also need to mark
1247 * the buffer as new so that the unwritten parts of the buffer gets
1251 ((!buffer_mapped(bh_result
) && !buffer_uptodate(bh_result
)) ||
1252 (offset
>= i_size_read(inode
)) ||
1253 (new || ISUNWRITTEN(&imap
))))
1254 set_buffer_new(bh_result
);
1256 if (imap
.br_startblock
== DELAYSTARTBLOCK
) {
1259 set_buffer_uptodate(bh_result
);
1260 set_buffer_mapped(bh_result
);
1261 set_buffer_delay(bh_result
);
1266 * If this is O_DIRECT or the mpage code calling tell them how large
1267 * the mapping is, so that we can avoid repeated get_blocks calls.
1269 if (direct
|| size
> (1 << inode
->i_blkbits
)) {
1270 xfs_off_t mapping_size
;
1272 mapping_size
= imap
.br_startoff
+ imap
.br_blockcount
- iblock
;
1273 mapping_size
<<= inode
->i_blkbits
;
1275 ASSERT(mapping_size
> 0);
1276 if (mapping_size
> size
)
1277 mapping_size
= size
;
1278 if (mapping_size
> LONG_MAX
)
1279 mapping_size
= LONG_MAX
;
1281 bh_result
->b_size
= mapping_size
;
1287 xfs_iunlock(ip
, lockmode
);
1293 struct inode
*inode
,
1295 struct buffer_head
*bh_result
,
1298 return __xfs_get_blocks(inode
, iblock
, bh_result
, create
, 0);
1302 xfs_get_blocks_direct(
1303 struct inode
*inode
,
1305 struct buffer_head
*bh_result
,
1308 return __xfs_get_blocks(inode
, iblock
, bh_result
, create
, 1);
1312 * Complete a direct I/O write request.
1314 * If the private argument is non-NULL __xfs_get_blocks signals us that we
1315 * need to issue a transaction to convert the range from unwritten to written
1316 * extents. In case this is regular synchronous I/O we just call xfs_end_io
1317 * to do this and we are done. But in case this was a successful AIO
1318 * request this handler is called from interrupt context, from which we
1319 * can't start transactions. In that case offload the I/O completion to
1320 * the workqueues we also use for buffered I/O completion.
1323 xfs_end_io_direct_write(
1331 struct xfs_ioend
*ioend
= iocb
->private;
1334 * While the generic direct I/O code updates the inode size, it does
1335 * so only after the end_io handler is called, which means our
1336 * end_io handler thinks the on-disk size is outside the in-core
1337 * size. To prevent this just update it a little bit earlier here.
1339 if (offset
+ size
> i_size_read(ioend
->io_inode
))
1340 i_size_write(ioend
->io_inode
, offset
+ size
);
1343 * blockdev_direct_IO can return an error even after the I/O
1344 * completion handler was called. Thus we need to protect
1345 * against double-freeing.
1347 iocb
->private = NULL
;
1349 ioend
->io_offset
= offset
;
1350 ioend
->io_size
= size
;
1351 ioend
->io_iocb
= iocb
;
1352 ioend
->io_result
= ret
;
1353 if (private && size
> 0)
1354 ioend
->io_type
= IO_UNWRITTEN
;
1357 ioend
->io_isasync
= 1;
1358 xfs_finish_ioend(ioend
);
1360 xfs_finish_ioend_sync(ioend
);
1368 const struct iovec
*iov
,
1370 unsigned long nr_segs
)
1372 struct inode
*inode
= iocb
->ki_filp
->f_mapping
->host
;
1373 struct block_device
*bdev
= xfs_find_bdev_for_inode(inode
);
1374 struct xfs_ioend
*ioend
= NULL
;
1378 size_t size
= iov_length(iov
, nr_segs
);
1381 * We need to preallocate a transaction for a size update
1382 * here. In the case that this write both updates the size
1383 * and converts at least on unwritten extent we will cancel
1384 * the still clean transaction after the I/O has finished.
1386 iocb
->private = ioend
= xfs_alloc_ioend(inode
, IO_DIRECT
);
1387 if (offset
+ size
> XFS_I(inode
)->i_d
.di_size
) {
1388 ret
= xfs_setfilesize_trans_alloc(ioend
);
1390 goto out_destroy_ioend
;
1391 ioend
->io_isdirect
= 1;
1394 ret
= __blockdev_direct_IO(rw
, iocb
, inode
, bdev
, iov
,
1396 xfs_get_blocks_direct
,
1397 xfs_end_io_direct_write
, NULL
, 0);
1398 if (ret
!= -EIOCBQUEUED
&& iocb
->private)
1399 goto out_trans_cancel
;
1401 ret
= __blockdev_direct_IO(rw
, iocb
, inode
, bdev
, iov
,
1403 xfs_get_blocks_direct
,
1410 if (ioend
->io_append_trans
) {
1411 current_set_flags_nested(&ioend
->io_append_trans
->t_pflags
,
1413 xfs_trans_cancel(ioend
->io_append_trans
, 0);
1416 xfs_destroy_ioend(ioend
);
1421 * Punch out the delalloc blocks we have already allocated.
1423 * Don't bother with xfs_setattr given that nothing can have made it to disk yet
1424 * as the page is still locked at this point.
1427 xfs_vm_kill_delalloc_range(
1428 struct inode
*inode
,
1432 struct xfs_inode
*ip
= XFS_I(inode
);
1433 xfs_fileoff_t start_fsb
;
1434 xfs_fileoff_t end_fsb
;
1437 start_fsb
= XFS_B_TO_FSB(ip
->i_mount
, start
);
1438 end_fsb
= XFS_B_TO_FSB(ip
->i_mount
, end
);
1439 if (end_fsb
<= start_fsb
)
1442 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
1443 error
= xfs_bmap_punch_delalloc_range(ip
, start_fsb
,
1444 end_fsb
- start_fsb
);
1446 /* something screwed, just bail */
1447 if (!XFS_FORCED_SHUTDOWN(ip
->i_mount
)) {
1448 xfs_alert(ip
->i_mount
,
1449 "xfs_vm_write_failed: unable to clean up ino %lld",
1453 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
1457 xfs_vm_write_failed(
1458 struct inode
*inode
,
1463 loff_t block_offset
= pos
& PAGE_MASK
;
1466 loff_t from
= pos
& (PAGE_CACHE_SIZE
- 1);
1467 loff_t to
= from
+ len
;
1468 struct buffer_head
*bh
, *head
;
1470 ASSERT(block_offset
+ from
== pos
);
1472 head
= page_buffers(page
);
1474 for (bh
= head
; bh
!= head
|| !block_start
;
1475 bh
= bh
->b_this_page
, block_start
= block_end
,
1476 block_offset
+= bh
->b_size
) {
1477 block_end
= block_start
+ bh
->b_size
;
1479 /* skip buffers before the write */
1480 if (block_end
<= from
)
1483 /* if the buffer is after the write, we're done */
1484 if (block_start
>= to
)
1487 if (!buffer_delay(bh
))
1490 if (!buffer_new(bh
) && block_offset
< i_size_read(inode
))
1493 xfs_vm_kill_delalloc_range(inode
, block_offset
,
1494 block_offset
+ bh
->b_size
);
1500 * This used to call block_write_begin(), but it unlocks and releases the page
1501 * on error, and we need that page to be able to punch stale delalloc blocks out
1502 * on failure. hence we copy-n-waste it here and call xfs_vm_write_failed() at
1503 * the appropriate point.
1508 struct address_space
*mapping
,
1512 struct page
**pagep
,
1515 pgoff_t index
= pos
>> PAGE_CACHE_SHIFT
;
1519 ASSERT(len
<= PAGE_CACHE_SIZE
);
1521 page
= grab_cache_page_write_begin(mapping
, index
,
1522 flags
| AOP_FLAG_NOFS
);
1526 status
= __block_write_begin(page
, pos
, len
, xfs_get_blocks
);
1527 if (unlikely(status
)) {
1528 struct inode
*inode
= mapping
->host
;
1530 xfs_vm_write_failed(inode
, page
, pos
, len
);
1533 if (pos
+ len
> i_size_read(inode
))
1534 truncate_pagecache(inode
, pos
+ len
, i_size_read(inode
));
1536 page_cache_release(page
);
1545 * On failure, we only need to kill delalloc blocks beyond EOF because they
1546 * will never be written. For blocks within EOF, generic_write_end() zeros them
1547 * so they are safe to leave alone and be written with all the other valid data.
1552 struct address_space
*mapping
,
1561 ASSERT(len
<= PAGE_CACHE_SIZE
);
1563 ret
= generic_write_end(file
, mapping
, pos
, len
, copied
, page
, fsdata
);
1564 if (unlikely(ret
< len
)) {
1565 struct inode
*inode
= mapping
->host
;
1566 size_t isize
= i_size_read(inode
);
1567 loff_t to
= pos
+ len
;
1570 truncate_pagecache(inode
, to
, isize
);
1571 xfs_vm_kill_delalloc_range(inode
, isize
, to
);
1579 struct address_space
*mapping
,
1582 struct inode
*inode
= (struct inode
*)mapping
->host
;
1583 struct xfs_inode
*ip
= XFS_I(inode
);
1585 trace_xfs_vm_bmap(XFS_I(inode
));
1586 xfs_ilock(ip
, XFS_IOLOCK_SHARED
);
1587 xfs_flush_pages(ip
, (xfs_off_t
)0, -1, 0, FI_REMAPF
);
1588 xfs_iunlock(ip
, XFS_IOLOCK_SHARED
);
1589 return generic_block_bmap(mapping
, block
, xfs_get_blocks
);
1594 struct file
*unused
,
1597 return mpage_readpage(page
, xfs_get_blocks
);
1602 struct file
*unused
,
1603 struct address_space
*mapping
,
1604 struct list_head
*pages
,
1607 return mpage_readpages(mapping
, pages
, nr_pages
, xfs_get_blocks
);
1610 const struct address_space_operations xfs_address_space_operations
= {
1611 .readpage
= xfs_vm_readpage
,
1612 .readpages
= xfs_vm_readpages
,
1613 .writepage
= xfs_vm_writepage
,
1614 .writepages
= xfs_vm_writepages
,
1615 .releasepage
= xfs_vm_releasepage
,
1616 .invalidatepage
= xfs_vm_invalidatepage
,
1617 .write_begin
= xfs_vm_write_begin
,
1618 .write_end
= xfs_vm_write_end
,
1619 .bmap
= xfs_vm_bmap
,
1620 .direct_IO
= xfs_vm_direct_IO
,
1621 .migratepage
= buffer_migrate_page
,
1622 .is_partially_uptodate
= block_is_partially_uptodate
,
1623 .error_remove_page
= generic_error_remove_page
,