2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
26 #include "xfs_trans.h"
27 #include "xfs_dmapi.h"
28 #include "xfs_mount.h"
29 #include "xfs_bmap_btree.h"
30 #include "xfs_alloc_btree.h"
31 #include "xfs_ialloc_btree.h"
32 #include "xfs_dir_sf.h"
33 #include "xfs_dir2_sf.h"
34 #include "xfs_attr_sf.h"
35 #include "xfs_dinode.h"
36 #include "xfs_inode.h"
37 #include "xfs_alloc.h"
38 #include "xfs_btree.h"
39 #include "xfs_error.h"
41 #include "xfs_iomap.h"
42 #include <linux/mpage.h>
43 #include <linux/pagevec.h>
44 #include <linux/writeback.h>
46 STATIC
void xfs_count_page_state(struct page
*, int *, int *, int *);
48 #if defined(XFS_RW_TRACE)
58 vnode_t
*vp
= LINVFS_GET_VP(inode
);
59 loff_t isize
= i_size_read(inode
);
60 loff_t offset
= page_offset(page
);
61 int delalloc
= -1, unmapped
= -1, unwritten
= -1;
63 if (page_has_buffers(page
))
64 xfs_count_page_state(page
, &delalloc
, &unmapped
, &unwritten
);
66 bdp
= vn_bhv_lookup(VN_BHV_HEAD(vp
), &xfs_vnodeops
);
71 ktrace_enter(ip
->i_rwtrace
,
72 (void *)((unsigned long)tag
),
76 (void *)((unsigned long)mask
),
77 (void *)((unsigned long)((ip
->i_d
.di_size
>> 32) & 0xffffffff)),
78 (void *)((unsigned long)(ip
->i_d
.di_size
& 0xffffffff)),
79 (void *)((unsigned long)((isize
>> 32) & 0xffffffff)),
80 (void *)((unsigned long)(isize
& 0xffffffff)),
81 (void *)((unsigned long)((offset
>> 32) & 0xffffffff)),
82 (void *)((unsigned long)(offset
& 0xffffffff)),
83 (void *)((unsigned long)delalloc
),
84 (void *)((unsigned long)unmapped
),
85 (void *)((unsigned long)unwritten
),
90 #define xfs_page_trace(tag, inode, page, mask)
94 * Schedule IO completion handling on a xfsdatad if this was
95 * the final hold on this ioend.
101 if (atomic_dec_and_test(&ioend
->io_remaining
))
102 queue_work(xfsdatad_workqueue
, &ioend
->io_work
);
106 * We're now finished for good with this ioend structure.
107 * Update the page state via the associated buffer_heads,
108 * release holds on the inode and bio, and finally free
109 * up memory. Do not use the ioend after this.
115 struct buffer_head
*bh
, *next
;
117 for (bh
= ioend
->io_buffer_head
; bh
; bh
= next
) {
118 next
= bh
->b_private
;
119 bh
->b_end_io(bh
, ioend
->io_uptodate
);
122 vn_iowake(ioend
->io_vnode
);
123 mempool_free(ioend
, xfs_ioend_pool
);
127 * Buffered IO write completion for delayed allocate extents.
128 * TODO: Update ondisk isize now that we know the file data
129 * has been flushed (i.e. the notorious "NULL file" problem).
132 xfs_end_bio_delalloc(
135 xfs_ioend_t
*ioend
= data
;
137 xfs_destroy_ioend(ioend
);
141 * Buffered IO write completion for regular, written extents.
147 xfs_ioend_t
*ioend
= data
;
149 xfs_destroy_ioend(ioend
);
153 * IO write completion for unwritten extents.
155 * Issue transactions to convert a buffer range from unwritten
156 * to written extents.
159 xfs_end_bio_unwritten(
162 xfs_ioend_t
*ioend
= data
;
163 vnode_t
*vp
= ioend
->io_vnode
;
164 xfs_off_t offset
= ioend
->io_offset
;
165 size_t size
= ioend
->io_size
;
168 if (ioend
->io_uptodate
)
169 VOP_BMAP(vp
, offset
, size
, BMAPI_UNWRITTEN
, NULL
, NULL
, error
);
170 xfs_destroy_ioend(ioend
);
174 * Allocate and initialise an IO completion structure.
175 * We need to track unwritten extent write completion here initially.
176 * We'll need to extend this for updating the ondisk inode size later
186 ioend
= mempool_alloc(xfs_ioend_pool
, GFP_NOFS
);
189 * Set the count to 1 initially, which will prevent an I/O
190 * completion callback from happening before we have started
191 * all the I/O from calling the completion routine too early.
193 atomic_set(&ioend
->io_remaining
, 1);
194 ioend
->io_uptodate
= 1; /* cleared if any I/O fails */
195 ioend
->io_list
= NULL
;
196 ioend
->io_type
= type
;
197 ioend
->io_vnode
= LINVFS_GET_VP(inode
);
198 ioend
->io_buffer_head
= NULL
;
199 ioend
->io_buffer_tail
= NULL
;
200 atomic_inc(&ioend
->io_vnode
->v_iocount
);
201 ioend
->io_offset
= 0;
204 if (type
== IOMAP_UNWRITTEN
)
205 INIT_WORK(&ioend
->io_work
, xfs_end_bio_unwritten
, ioend
);
206 else if (type
== IOMAP_DELAY
)
207 INIT_WORK(&ioend
->io_work
, xfs_end_bio_delalloc
, ioend
);
209 INIT_WORK(&ioend
->io_work
, xfs_end_bio_written
, ioend
);
222 vnode_t
*vp
= LINVFS_GET_VP(inode
);
223 int error
, nmaps
= 1;
225 VOP_BMAP(vp
, offset
, count
, flags
, mapp
, &nmaps
, error
);
226 if (!error
&& (flags
& (BMAPI_WRITE
|BMAPI_ALLOCATE
)))
236 return offset
>= iomapp
->iomap_offset
&&
237 offset
< iomapp
->iomap_offset
+ iomapp
->iomap_bsize
;
241 * BIO completion handler for buffered IO.
246 unsigned int bytes_done
,
249 xfs_ioend_t
*ioend
= bio
->bi_private
;
255 ASSERT(atomic_read(&bio
->bi_cnt
) >= 1);
257 /* Toss bio and pass work off to an xfsdatad thread */
258 if (!test_bit(BIO_UPTODATE
, &bio
->bi_flags
))
259 ioend
->io_uptodate
= 0;
260 bio
->bi_private
= NULL
;
261 bio
->bi_end_io
= NULL
;
264 xfs_finish_ioend(ioend
);
269 xfs_submit_ioend_bio(
273 atomic_inc(&ioend
->io_remaining
);
275 bio
->bi_private
= ioend
;
276 bio
->bi_end_io
= xfs_end_bio
;
278 submit_bio(WRITE
, bio
);
279 ASSERT(!bio_flagged(bio
, BIO_EOPNOTSUPP
));
285 struct buffer_head
*bh
)
288 int nvecs
= bio_get_nr_vecs(bh
->b_bdev
);
291 bio
= bio_alloc(GFP_NOIO
, nvecs
);
295 ASSERT(bio
->bi_private
== NULL
);
296 bio
->bi_sector
= bh
->b_blocknr
* (bh
->b_size
>> 9);
297 bio
->bi_bdev
= bh
->b_bdev
;
303 xfs_start_buffer_writeback(
304 struct buffer_head
*bh
)
306 ASSERT(buffer_mapped(bh
));
307 ASSERT(buffer_locked(bh
));
308 ASSERT(!buffer_delay(bh
));
309 ASSERT(!buffer_unwritten(bh
));
311 mark_buffer_async_write(bh
);
312 set_buffer_uptodate(bh
);
313 clear_buffer_dirty(bh
);
317 xfs_start_page_writeback(
319 struct writeback_control
*wbc
,
323 ASSERT(PageLocked(page
));
324 ASSERT(!PageWriteback(page
));
325 set_page_writeback(page
);
327 clear_page_dirty(page
);
330 end_page_writeback(page
);
331 wbc
->pages_skipped
++; /* We didn't write this page */
335 static inline int bio_add_buffer(struct bio
*bio
, struct buffer_head
*bh
)
337 return bio_add_page(bio
, bh
->b_page
, bh
->b_size
, bh_offset(bh
));
341 * Submit all of the bios for all of the ioends we have saved up,
342 * covering the initial writepage page and also any probed pages.
349 struct buffer_head
*bh
;
351 sector_t lastblock
= 0;
354 next
= ioend
->io_list
;
357 for (bh
= ioend
->io_buffer_head
; bh
; bh
= bh
->b_private
) {
358 xfs_start_buffer_writeback(bh
);
362 bio
= xfs_alloc_ioend_bio(bh
);
363 } else if (bh
->b_blocknr
!= lastblock
+ 1) {
364 xfs_submit_ioend_bio(ioend
, bio
);
368 if (bio_add_buffer(bio
, bh
) != bh
->b_size
) {
369 xfs_submit_ioend_bio(ioend
, bio
);
373 lastblock
= bh
->b_blocknr
;
376 xfs_submit_ioend_bio(ioend
, bio
);
377 xfs_finish_ioend(ioend
);
378 } while ((ioend
= next
) != NULL
);
382 * Cancel submission of all buffer_heads so far in this endio.
383 * Toss the endio too. Only ever called for the initial page
384 * in a writepage request, so only ever one page.
391 struct buffer_head
*bh
, *next_bh
;
394 next
= ioend
->io_list
;
395 bh
= ioend
->io_buffer_head
;
397 next_bh
= bh
->b_private
;
398 clear_buffer_async_write(bh
);
400 } while ((bh
= next_bh
) != NULL
);
402 vn_iowake(ioend
->io_vnode
);
403 mempool_free(ioend
, xfs_ioend_pool
);
404 } while ((ioend
= next
) != NULL
);
408 * Test to see if we've been building up a completion structure for
409 * earlier buffers -- if so, we try to append to this ioend if we
410 * can, otherwise we finish off any current ioend and start another.
411 * Return true if we've finished the given ioend.
416 struct buffer_head
*bh
,
419 xfs_ioend_t
**result
,
422 xfs_ioend_t
*ioend
= *result
;
424 if (!ioend
|| need_ioend
|| type
!= ioend
->io_type
) {
425 xfs_ioend_t
*previous
= *result
;
427 ioend
= xfs_alloc_ioend(inode
, type
);
428 ioend
->io_offset
= offset
;
429 ioend
->io_buffer_head
= bh
;
430 ioend
->io_buffer_tail
= bh
;
432 previous
->io_list
= ioend
;
435 ioend
->io_buffer_tail
->b_private
= bh
;
436 ioend
->io_buffer_tail
= bh
;
439 bh
->b_private
= NULL
;
440 ioend
->io_size
+= bh
->b_size
;
445 struct buffer_head
*bh
,
453 ASSERT(!(iomapp
->iomap_flags
& IOMAP_HOLE
));
454 ASSERT(!(iomapp
->iomap_flags
& IOMAP_DELAY
));
455 ASSERT(iomapp
->iomap_bn
!= IOMAP_DADDR_NULL
);
457 sector_shift
= block_bits
- BBSHIFT
;
458 bn
= (iomapp
->iomap_bn
>> sector_shift
) +
459 ((offset
- iomapp
->iomap_offset
) >> block_bits
);
461 ASSERT(bn
|| (iomapp
->iomap_flags
& IOMAP_REALTIME
));
462 ASSERT((bn
<< sector_shift
) >= iomapp
->iomap_bn
);
466 bh
->b_bdev
= iomapp
->iomap_target
->bt_bdev
;
467 set_buffer_mapped(bh
);
468 clear_buffer_delay(bh
);
469 clear_buffer_unwritten(bh
);
473 * Look for a page at index which is unlocked and not mapped
474 * yet - clustering for mmap write case.
477 xfs_probe_unmapped_page(
479 unsigned int pg_offset
)
483 if (PageWriteback(page
))
486 if (page
->mapping
&& PageDirty(page
)) {
487 if (page_has_buffers(page
)) {
488 struct buffer_head
*bh
, *head
;
490 bh
= head
= page_buffers(page
);
492 if (buffer_mapped(bh
) || !buffer_uptodate(bh
))
495 if (ret
>= pg_offset
)
497 } while ((bh
= bh
->b_this_page
) != head
);
499 ret
= PAGE_CACHE_SIZE
;
506 xfs_probe_unmapped_cluster(
508 struct page
*startpage
,
509 struct buffer_head
*bh
,
510 struct buffer_head
*head
)
513 pgoff_t tindex
, tlast
, tloff
;
517 /* First sum forwards in this page */
519 if (buffer_mapped(bh
))
522 } while ((bh
= bh
->b_this_page
) != head
);
524 /* if we reached the end of the page, sum forwards in following pages */
525 tlast
= i_size_read(inode
) >> PAGE_CACHE_SHIFT
;
526 tindex
= startpage
->index
+ 1;
528 /* Prune this back to avoid pathological behavior */
529 tloff
= min(tlast
, startpage
->index
+ 64);
531 pagevec_init(&pvec
, 0);
532 while (!done
&& tindex
<= tloff
) {
533 unsigned len
= min_t(pgoff_t
, PAGEVEC_SIZE
, tlast
- tindex
+ 1);
535 if (!pagevec_lookup(&pvec
, inode
->i_mapping
, tindex
, len
))
538 for (i
= 0; i
< pagevec_count(&pvec
); i
++) {
539 struct page
*page
= pvec
.pages
[i
];
540 size_t pg_offset
, len
= 0;
542 if (tindex
== tlast
) {
544 i_size_read(inode
) & (PAGE_CACHE_SIZE
- 1);
550 pg_offset
= PAGE_CACHE_SIZE
;
552 if (page
->index
== tindex
&& !TestSetPageLocked(page
)) {
553 len
= xfs_probe_unmapped_page(page
, pg_offset
);
566 pagevec_release(&pvec
);
574 * Test if a given page is suitable for writing as part of an unwritten
575 * or delayed allocate extent.
582 if (PageWriteback(page
))
585 if (page
->mapping
&& page_has_buffers(page
)) {
586 struct buffer_head
*bh
, *head
;
589 bh
= head
= page_buffers(page
);
591 if (buffer_unwritten(bh
))
592 acceptable
= (type
== IOMAP_UNWRITTEN
);
593 else if (buffer_delay(bh
))
594 acceptable
= (type
== IOMAP_DELAY
);
597 } while ((bh
= bh
->b_this_page
) != head
);
607 * Allocate & map buffers for page given the extent map. Write it out.
608 * except for the original page of a writepage, this is called on
609 * delalloc/unwritten pages only, for the original page it is possible
610 * that the page has no mapping at all.
618 xfs_ioend_t
**ioendp
,
619 struct writeback_control
*wbc
,
623 struct buffer_head
*bh
, *head
;
624 xfs_off_t end_offset
;
625 unsigned long p_offset
;
627 int bbits
= inode
->i_blkbits
;
629 int count
= 0, done
= 0, uptodate
= 1;
630 xfs_off_t offset
= page_offset(page
);
632 if (page
->index
!= tindex
)
634 if (TestSetPageLocked(page
))
636 if (PageWriteback(page
))
637 goto fail_unlock_page
;
638 if (page
->mapping
!= inode
->i_mapping
)
639 goto fail_unlock_page
;
640 if (!xfs_is_delayed_page(page
, (*ioendp
)->io_type
))
641 goto fail_unlock_page
;
644 * page_dirty is initially a count of buffers on the page before
645 * EOF and is decrememted as we move each into a cleanable state.
649 * End offset is the highest offset that this page should represent.
650 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
651 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
652 * hence give us the correct page_dirty count. On any other page,
653 * it will be zero and in that case we need page_dirty to be the
654 * count of buffers on the page.
656 end_offset
= min_t(unsigned long long,
657 (xfs_off_t
)(page
->index
+ 1) << PAGE_CACHE_SHIFT
,
660 len
= 1 << inode
->i_blkbits
;
661 p_offset
= min_t(unsigned long, end_offset
& (PAGE_CACHE_SIZE
- 1),
663 p_offset
= p_offset
? roundup(p_offset
, len
) : PAGE_CACHE_SIZE
;
664 page_dirty
= p_offset
/ len
;
666 bh
= head
= page_buffers(page
);
668 if (offset
>= end_offset
)
670 if (!buffer_uptodate(bh
))
672 if (!(PageUptodate(page
) || buffer_uptodate(bh
))) {
677 if (buffer_unwritten(bh
) || buffer_delay(bh
)) {
678 if (buffer_unwritten(bh
))
679 type
= IOMAP_UNWRITTEN
;
683 if (!xfs_iomap_valid(mp
, offset
)) {
688 ASSERT(!(mp
->iomap_flags
& IOMAP_HOLE
));
689 ASSERT(!(mp
->iomap_flags
& IOMAP_DELAY
));
691 xfs_map_at_offset(bh
, offset
, bbits
, mp
);
693 xfs_add_to_ioend(inode
, bh
, offset
,
696 set_buffer_dirty(bh
);
698 mark_buffer_dirty(bh
);
704 if (buffer_mapped(bh
) && all_bh
&& startio
) {
706 xfs_add_to_ioend(inode
, bh
, offset
,
714 } while (offset
+= len
, (bh
= bh
->b_this_page
) != head
);
716 if (uptodate
&& bh
== head
)
717 SetPageUptodate(page
);
722 xfs_start_page_writeback(page
, wbc
, !page_dirty
, count
);
733 * Convert & write out a cluster of pages in the same extent as defined
734 * by mp and following the start page.
741 xfs_ioend_t
**ioendp
,
742 struct writeback_control
*wbc
,
750 pagevec_init(&pvec
, 0);
751 while (!done
&& tindex
<= tlast
) {
752 unsigned len
= min_t(pgoff_t
, PAGEVEC_SIZE
, tlast
- tindex
+ 1);
754 if (!pagevec_lookup(&pvec
, inode
->i_mapping
, tindex
, len
))
757 for (i
= 0; i
< pagevec_count(&pvec
); i
++) {
758 done
= xfs_convert_page(inode
, pvec
.pages
[i
], tindex
++,
759 iomapp
, ioendp
, wbc
, startio
, all_bh
);
764 pagevec_release(&pvec
);
770 * Calling this without startio set means we are being asked to make a dirty
771 * page ready for freeing it's buffers. When called with startio set then
772 * we are coming from writepage.
774 * When called with startio set it is important that we write the WHOLE
776 * The bh->b_state's cannot know if any of the blocks or which block for
777 * that matter are dirty due to mmap writes, and therefore bh uptodate is
778 * only vaild if the page itself isn't completely uptodate. Some layers
779 * may clear the page dirty flag prior to calling write page, under the
780 * assumption the entire page will be written out; by not writing out the
781 * whole page the page can be reused before all valid dirty data is
782 * written out. Note: in the case of a page that has been dirty'd by
783 * mapwrite and but partially setup by block_prepare_write the
784 * bh->b_states's will not agree and only ones setup by BPW/BCW will have
785 * valid state, thus the whole page must be written out thing.
789 xfs_page_state_convert(
792 struct writeback_control
*wbc
,
794 int unmapped
) /* also implies page uptodate */
796 struct buffer_head
*bh
, *head
;
798 xfs_ioend_t
*ioend
= NULL
, *iohead
= NULL
;
800 unsigned long p_offset
= 0;
802 __uint64_t end_offset
;
803 pgoff_t end_index
, last_index
, tlast
;
805 int flags
, err
, iomap_valid
= 0, uptodate
= 1;
806 int page_dirty
, count
= 0, trylock_flag
= 0;
808 /* wait for other IO threads? */
809 if (startio
&& wbc
->sync_mode
!= WB_SYNC_NONE
)
810 trylock_flag
|= BMAPI_TRYLOCK
;
812 /* Is this page beyond the end of the file? */
813 offset
= i_size_read(inode
);
814 end_index
= offset
>> PAGE_CACHE_SHIFT
;
815 last_index
= (offset
- 1) >> PAGE_CACHE_SHIFT
;
816 if (page
->index
>= end_index
) {
817 if ((page
->index
>= end_index
+ 1) ||
818 !(i_size_read(inode
) & (PAGE_CACHE_SIZE
- 1))) {
826 * page_dirty is initially a count of buffers on the page before
827 * EOF and is decrememted as we move each into a cleanable state.
831 * End offset is the highest offset that this page should represent.
832 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
833 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
834 * hence give us the correct page_dirty count. On any other page,
835 * it will be zero and in that case we need page_dirty to be the
836 * count of buffers on the page.
838 end_offset
= min_t(unsigned long long,
839 (xfs_off_t
)(page
->index
+ 1) << PAGE_CACHE_SHIFT
, offset
);
840 len
= 1 << inode
->i_blkbits
;
841 p_offset
= min_t(unsigned long, end_offset
& (PAGE_CACHE_SIZE
- 1),
843 p_offset
= p_offset
? roundup(p_offset
, len
) : PAGE_CACHE_SIZE
;
844 page_dirty
= p_offset
/ len
;
846 bh
= head
= page_buffers(page
);
847 offset
= page_offset(page
);
849 /* TODO: cleanup count and page_dirty */
852 if (offset
>= end_offset
)
854 if (!buffer_uptodate(bh
))
856 if (!(PageUptodate(page
) || buffer_uptodate(bh
)) && !startio
) {
858 * the iomap is actually still valid, but the ioend
859 * isn't. shouldn't happen too often.
866 iomap_valid
= xfs_iomap_valid(&iomap
, offset
);
869 * First case, map an unwritten extent and prepare for
870 * extent state conversion transaction on completion.
872 * Second case, allocate space for a delalloc buffer.
873 * We can return EAGAIN here in the release page case.
875 * Third case, an unmapped buffer was found, and we are
876 * in a path where we need to write the whole page out.
878 if (buffer_unwritten(bh
) || buffer_delay(bh
) ||
879 ((buffer_uptodate(bh
) || PageUptodate(page
)) &&
880 !buffer_mapped(bh
) && (unmapped
|| startio
))) {
881 if (buffer_unwritten(bh
)) {
882 type
= IOMAP_UNWRITTEN
;
883 flags
= BMAPI_WRITE
|BMAPI_IGNSTATE
;
884 } else if (buffer_delay(bh
)) {
886 flags
= BMAPI_ALLOCATE
;
888 flags
|= trylock_flag
;
891 flags
= BMAPI_WRITE
|BMAPI_MMAP
;
896 size
= xfs_probe_unmapped_cluster(inode
,
902 err
= xfs_map_blocks(inode
, offset
, size
,
906 iomap_valid
= xfs_iomap_valid(&iomap
, offset
);
909 xfs_map_at_offset(bh
, offset
,
910 inode
->i_blkbits
, &iomap
);
912 xfs_add_to_ioend(inode
, bh
, offset
,
916 set_buffer_dirty(bh
);
918 mark_buffer_dirty(bh
);
923 } else if (buffer_uptodate(bh
) && startio
) {
926 if (!test_and_set_bit(BH_Lock
, &bh
->b_state
)) {
927 ASSERT(buffer_mapped(bh
));
928 xfs_add_to_ioend(inode
, bh
, offset
, type
,
929 &ioend
, !iomap_valid
);
935 } else if ((buffer_uptodate(bh
) || PageUptodate(page
)) &&
936 (unmapped
|| startio
)) {
943 } while (offset
+= len
, ((bh
= bh
->b_this_page
) != head
));
945 if (uptodate
&& bh
== head
)
946 SetPageUptodate(page
);
949 xfs_start_page_writeback(page
, wbc
, 1, count
);
951 if (ioend
&& iomap_valid
) {
952 offset
= (iomap
.iomap_offset
+ iomap
.iomap_bsize
- 1) >>
954 tlast
= min_t(pgoff_t
, offset
, last_index
);
955 xfs_cluster_write(inode
, page
->index
+ 1, &iomap
, &ioend
,
956 wbc
, startio
, unmapped
, tlast
);
960 xfs_submit_ioend(iohead
);
966 xfs_cancel_ioend(iohead
);
969 * If it's delalloc and we have nowhere to put it,
970 * throw it away, unless the lower layers told
973 if (err
!= -EAGAIN
) {
975 block_invalidatepage(page
, 0);
976 ClearPageUptodate(page
);
985 unsigned long blocks
,
986 struct buffer_head
*bh_result
,
991 vnode_t
*vp
= LINVFS_GET_VP(inode
);
998 offset
= (xfs_off_t
)iblock
<< inode
->i_blkbits
;
1000 size
= (ssize_t
) min_t(xfs_off_t
, LONG_MAX
,
1001 (xfs_off_t
)blocks
<< inode
->i_blkbits
);
1003 size
= 1 << inode
->i_blkbits
;
1005 VOP_BMAP(vp
, offset
, size
,
1006 create
? flags
: BMAPI_READ
, &iomap
, &retpbbm
, error
);
1013 if (iomap
.iomap_bn
!= IOMAP_DADDR_NULL
) {
1017 /* For unwritten extents do not report a disk address on
1018 * the read case (treat as if we're reading into a hole).
1020 if (create
|| !(iomap
.iomap_flags
& IOMAP_UNWRITTEN
)) {
1021 delta
= offset
- iomap
.iomap_offset
;
1022 delta
>>= inode
->i_blkbits
;
1024 bn
= iomap
.iomap_bn
>> (inode
->i_blkbits
- BBSHIFT
);
1026 BUG_ON(!bn
&& !(iomap
.iomap_flags
& IOMAP_REALTIME
));
1027 bh_result
->b_blocknr
= bn
;
1028 set_buffer_mapped(bh_result
);
1030 if (create
&& (iomap
.iomap_flags
& IOMAP_UNWRITTEN
)) {
1032 bh_result
->b_private
= inode
;
1033 set_buffer_unwritten(bh_result
);
1034 set_buffer_delay(bh_result
);
1038 /* If this is a realtime file, data might be on a new device */
1039 bh_result
->b_bdev
= iomap
.iomap_target
->bt_bdev
;
1041 /* If we previously allocated a block out beyond eof and
1042 * we are now coming back to use it then we will need to
1043 * flag it as new even if it has a disk address.
1046 ((!buffer_mapped(bh_result
) && !buffer_uptodate(bh_result
)) ||
1047 (offset
>= i_size_read(inode
)) || (iomap
.iomap_flags
& IOMAP_NEW
)))
1048 set_buffer_new(bh_result
);
1050 if (iomap
.iomap_flags
& IOMAP_DELAY
) {
1053 set_buffer_uptodate(bh_result
);
1054 set_buffer_mapped(bh_result
);
1055 set_buffer_delay(bh_result
);
1060 ASSERT(iomap
.iomap_bsize
- iomap
.iomap_delta
> 0);
1061 offset
= min_t(xfs_off_t
,
1062 iomap
.iomap_bsize
- iomap
.iomap_delta
,
1063 (xfs_off_t
)blocks
<< inode
->i_blkbits
);
1064 bh_result
->b_size
= (u32
) min_t(xfs_off_t
, UINT_MAX
, offset
);
1072 struct inode
*inode
,
1074 struct buffer_head
*bh_result
,
1077 return __linvfs_get_block(inode
, iblock
, 0, bh_result
,
1078 create
, 0, BMAPI_WRITE
);
1082 linvfs_get_blocks_direct(
1083 struct inode
*inode
,
1085 unsigned long max_blocks
,
1086 struct buffer_head
*bh_result
,
1089 return __linvfs_get_block(inode
, iblock
, max_blocks
, bh_result
,
1090 create
, 1, BMAPI_WRITE
|BMAPI_DIRECT
);
1094 linvfs_end_io_direct(
1100 xfs_ioend_t
*ioend
= iocb
->private;
1103 * Non-NULL private data means we need to issue a transaction to
1104 * convert a range from unwritten to written extents. This needs
1105 * to happen from process contect but aio+dio I/O completion
1106 * happens from irq context so we need to defer it to a workqueue.
1107 * This is not nessecary for synchronous direct I/O, but we do
1108 * it anyway to keep the code uniform and simpler.
1110 * The core direct I/O code might be changed to always call the
1111 * completion handler in the future, in which case all this can
1114 if (private && size
> 0) {
1115 ioend
->io_offset
= offset
;
1116 ioend
->io_size
= size
;
1117 xfs_finish_ioend(ioend
);
1120 xfs_destroy_ioend(ioend
);
1124 * blockdev_direct_IO can return an error even afer the I/O
1125 * completion handler was called. Thus we need to protect
1126 * against double-freeing.
1128 iocb
->private = NULL
;
1135 const struct iovec
*iov
,
1137 unsigned long nr_segs
)
1139 struct file
*file
= iocb
->ki_filp
;
1140 struct inode
*inode
= file
->f_mapping
->host
;
1141 vnode_t
*vp
= LINVFS_GET_VP(inode
);
1147 VOP_BMAP(vp
, offset
, 0, BMAPI_DEVICE
, &iomap
, &maps
, error
);
1151 iocb
->private = xfs_alloc_ioend(inode
, IOMAP_UNWRITTEN
);
1153 ret
= blockdev_direct_IO_own_locking(rw
, iocb
, inode
,
1154 iomap
.iomap_target
->bt_bdev
,
1155 iov
, offset
, nr_segs
,
1156 linvfs_get_blocks_direct
,
1157 linvfs_end_io_direct
);
1159 if (unlikely(ret
<= 0 && iocb
->private))
1160 xfs_destroy_ioend(iocb
->private);
1167 struct address_space
*mapping
,
1170 struct inode
*inode
= (struct inode
*)mapping
->host
;
1171 vnode_t
*vp
= LINVFS_GET_VP(inode
);
1174 vn_trace_entry(vp
, "linvfs_bmap", (inst_t
*)__return_address
);
1176 VOP_RWLOCK(vp
, VRWLOCK_READ
);
1177 VOP_FLUSH_PAGES(vp
, (xfs_off_t
)0, -1, 0, FI_REMAPF
, error
);
1178 VOP_RWUNLOCK(vp
, VRWLOCK_READ
);
1179 return generic_block_bmap(mapping
, block
, linvfs_get_block
);
1184 struct file
*unused
,
1187 return mpage_readpage(page
, linvfs_get_block
);
1192 struct file
*unused
,
1193 struct address_space
*mapping
,
1194 struct list_head
*pages
,
1197 return mpage_readpages(mapping
, pages
, nr_pages
, linvfs_get_block
);
1201 xfs_count_page_state(
1207 struct buffer_head
*bh
, *head
;
1209 *delalloc
= *unmapped
= *unwritten
= 0;
1211 bh
= head
= page_buffers(page
);
1213 if (buffer_uptodate(bh
) && !buffer_mapped(bh
))
1215 else if (buffer_unwritten(bh
) && !buffer_delay(bh
))
1216 clear_buffer_unwritten(bh
);
1217 else if (buffer_unwritten(bh
))
1219 else if (buffer_delay(bh
))
1221 } while ((bh
= bh
->b_this_page
) != head
);
1226 * writepage: Called from one of two places:
1228 * 1. we are flushing a delalloc buffer head.
1230 * 2. we are writing out a dirty page. Typically the page dirty
1231 * state is cleared before we get here. In this case is it
1232 * conceivable we have no buffer heads.
1234 * For delalloc space on the page we need to allocate space and
1235 * flush it. For unmapped buffer heads on the page we should
1236 * allocate space if the page is uptodate. For any other dirty
1237 * buffer heads on the page we should flush them.
1239 * If we detect that a transaction would be required to flush
1240 * the page, we have to check the process flags first, if we
1241 * are already in a transaction or disk I/O during allocations
1242 * is off, we need to fail the writepage and redirty the page.
1248 struct writeback_control
*wbc
)
1252 int delalloc
, unmapped
, unwritten
;
1253 struct inode
*inode
= page
->mapping
->host
;
1255 xfs_page_trace(XFS_WRITEPAGE_ENTER
, inode
, page
, 0);
1258 * We need a transaction if:
1259 * 1. There are delalloc buffers on the page
1260 * 2. The page is uptodate and we have unmapped buffers
1261 * 3. The page is uptodate and we have no buffers
1262 * 4. There are unwritten buffers on the page
1265 if (!page_has_buffers(page
)) {
1269 xfs_count_page_state(page
, &delalloc
, &unmapped
, &unwritten
);
1270 if (!PageUptodate(page
))
1272 need_trans
= delalloc
+ unmapped
+ unwritten
;
1276 * If we need a transaction and the process flags say
1277 * we are already in a transaction, or no IO is allowed
1278 * then mark the page dirty again and leave the page
1281 if (PFLAGS_TEST_FSTRANS() && need_trans
)
1285 * Delay hooking up buffer heads until we have
1286 * made our go/no-go decision.
1288 if (!page_has_buffers(page
))
1289 create_empty_buffers(page
, 1 << inode
->i_blkbits
, 0);
1292 * Convert delayed allocate, unwritten or unmapped space
1293 * to real space and flush out to disk.
1295 error
= xfs_page_state_convert(inode
, page
, wbc
, 1, unmapped
);
1296 if (error
== -EAGAIN
)
1298 if (unlikely(error
< 0))
1304 redirty_page_for_writepage(wbc
, page
);
1313 linvfs_invalidate_page(
1315 unsigned long offset
)
1317 xfs_page_trace(XFS_INVALIDPAGE_ENTER
,
1318 page
->mapping
->host
, page
, offset
);
1319 return block_invalidatepage(page
, offset
);
1323 * Called to move a page into cleanable state - and from there
1324 * to be released. Possibly the page is already clean. We always
1325 * have buffer heads in this call.
1327 * Returns 0 if the page is ok to release, 1 otherwise.
1329 * Possible scenarios are:
1331 * 1. We are being called to release a page which has been written
1332 * to via regular I/O. buffer heads will be dirty and possibly
1333 * delalloc. If no delalloc buffer heads in this case then we
1334 * can just return zero.
1336 * 2. We are called to release a page which has been written via
1337 * mmap, all we need to do is ensure there is no delalloc
1338 * state in the buffer heads, if not we can let the caller
1339 * free them and we should come back later via writepage.
1342 linvfs_release_page(
1346 struct inode
*inode
= page
->mapping
->host
;
1347 int dirty
, delalloc
, unmapped
, unwritten
;
1348 struct writeback_control wbc
= {
1349 .sync_mode
= WB_SYNC_ALL
,
1353 xfs_page_trace(XFS_RELEASEPAGE_ENTER
, inode
, page
, gfp_mask
);
1355 xfs_count_page_state(page
, &delalloc
, &unmapped
, &unwritten
);
1356 if (!delalloc
&& !unwritten
)
1359 if (!(gfp_mask
& __GFP_FS
))
1362 /* If we are already inside a transaction or the thread cannot
1363 * do I/O, we cannot release this page.
1365 if (PFLAGS_TEST_FSTRANS())
1369 * Convert delalloc space to real space, do not flush the
1370 * data out to disk, that will be done by the caller.
1371 * Never need to allocate space here - we will always
1372 * come back to writepage in that case.
1374 dirty
= xfs_page_state_convert(inode
, page
, &wbc
, 0, 0);
1375 if (dirty
== 0 && !unwritten
)
1380 return try_to_free_buffers(page
);
1384 linvfs_prepare_write(
1390 return block_prepare_write(page
, from
, to
, linvfs_get_block
);
1393 struct address_space_operations linvfs_aops
= {
1394 .readpage
= linvfs_readpage
,
1395 .readpages
= linvfs_readpages
,
1396 .writepage
= linvfs_writepage
,
1397 .sync_page
= block_sync_page
,
1398 .releasepage
= linvfs_release_page
,
1399 .invalidatepage
= linvfs_invalidate_page
,
1400 .prepare_write
= linvfs_prepare_write
,
1401 .commit_write
= generic_commit_write
,
1402 .bmap
= linvfs_bmap
,
1403 .direct_IO
= linvfs_direct_IO
,