2 * Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it would be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
12 * Further, this software is distributed without any warranty that it is
13 * free of the rightful claim of any third person regarding infringement
14 * or the like. Any license provided herein, whether implied or
15 * otherwise, applies only to this software file. Patent licenses, if
16 * any, provided herein do not apply to combinations of this program with
17 * other software, or any other product whatsoever.
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write the Free Software Foundation, Inc., 59
21 * Temple Place - Suite 330, Boston MA 02111-1307, USA.
23 * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
24 * Mountain View, CA 94043, or:
28 * For further information regarding this notice, see:
30 * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
39 #include "xfs_trans.h"
40 #include "xfs_dmapi.h"
41 #include "xfs_mount.h"
42 #include "xfs_bmap_btree.h"
43 #include "xfs_alloc_btree.h"
44 #include "xfs_ialloc_btree.h"
45 #include "xfs_alloc.h"
46 #include "xfs_btree.h"
47 #include "xfs_attr_sf.h"
48 #include "xfs_dir_sf.h"
49 #include "xfs_dir2_sf.h"
50 #include "xfs_dinode.h"
51 #include "xfs_inode.h"
52 #include "xfs_error.h"
54 #include "xfs_iomap.h"
55 #include <linux/mpage.h>
56 #include <linux/writeback.h>
58 STATIC
void xfs_count_page_state(struct page
*, int *, int *, int *);
59 STATIC
void xfs_convert_page(struct inode
*, struct page
*, xfs_iomap_t
*,
60 struct writeback_control
*wbc
, void *, int, int);
62 #if defined(XFS_RW_TRACE)
72 vnode_t
*vp
= LINVFS_GET_VP(inode
);
73 loff_t isize
= i_size_read(inode
);
74 loff_t offset
= page
->index
<< PAGE_CACHE_SHIFT
;
75 int delalloc
= -1, unmapped
= -1, unwritten
= -1;
77 if (page_has_buffers(page
))
78 xfs_count_page_state(page
, &delalloc
, &unmapped
, &unwritten
);
80 bdp
= vn_bhv_lookup(VN_BHV_HEAD(vp
), &xfs_vnodeops
);
85 ktrace_enter(ip
->i_rwtrace
,
86 (void *)((unsigned long)tag
),
90 (void *)((unsigned long)mask
),
91 (void *)((unsigned long)((ip
->i_d
.di_size
>> 32) & 0xffffffff)),
92 (void *)((unsigned long)(ip
->i_d
.di_size
& 0xffffffff)),
93 (void *)((unsigned long)((isize
>> 32) & 0xffffffff)),
94 (void *)((unsigned long)(isize
& 0xffffffff)),
95 (void *)((unsigned long)((offset
>> 32) & 0xffffffff)),
96 (void *)((unsigned long)(offset
& 0xffffffff)),
97 (void *)((unsigned long)delalloc
),
98 (void *)((unsigned long)unmapped
),
99 (void *)((unsigned long)unwritten
),
104 #define xfs_page_trace(tag, inode, page, mask)
108 linvfs_unwritten_done(
109 struct buffer_head
*bh
,
112 xfs_buf_t
*pb
= (xfs_buf_t
*)bh
->b_private
;
114 ASSERT(buffer_unwritten(bh
));
116 clear_buffer_unwritten(bh
);
118 pagebuf_ioerror(pb
, EIO
);
119 if (atomic_dec_and_test(&pb
->pb_io_remaining
) == 1) {
120 pagebuf_iodone(pb
, 1, 1);
122 end_buffer_async_write(bh
, uptodate
);
126 * Issue transactions to convert a buffer range from unwritten
127 * to written extents (buffered IO).
130 linvfs_unwritten_convert(
133 vnode_t
*vp
= XFS_BUF_FSPRIVATE(bp
, vnode_t
*);
136 BUG_ON(atomic_read(&bp
->pb_hold
) < 1);
137 VOP_BMAP(vp
, XFS_BUF_OFFSET(bp
), XFS_BUF_SIZE(bp
),
138 BMAPI_UNWRITTEN
, NULL
, NULL
, error
);
139 XFS_BUF_SET_FSPRIVATE(bp
, NULL
);
140 XFS_BUF_CLR_IODONE_FUNC(bp
);
141 XFS_BUF_UNDATAIO(bp
);
142 iput(LINVFS_GET_IP(vp
));
143 pagebuf_iodone(bp
, 0, 0);
147 * Issue transactions to convert a buffer range from unwritten
148 * to written extents (direct IO).
151 linvfs_unwritten_convert_direct(
157 ASSERT(!private || inode
== (struct inode
*)private);
159 /* private indicates an unwritten extent lay beneath this IO,
160 * see linvfs_get_block_core.
162 if (private && size
> 0) {
163 vnode_t
*vp
= LINVFS_GET_VP(inode
);
166 VOP_BMAP(vp
, offset
, size
, BMAPI_UNWRITTEN
, NULL
, NULL
, error
);
178 vnode_t
*vp
= LINVFS_GET_VP(inode
);
179 int error
, nmaps
= 1;
181 VOP_BMAP(vp
, offset
, count
, flags
, mapp
, &nmaps
, error
);
182 if (!error
&& (flags
& (BMAPI_WRITE
|BMAPI_ALLOCATE
)))
188 * Finds the corresponding mapping in block @map array of the
189 * given @offset within a @page.
195 unsigned long offset
)
197 loff_t full_offset
; /* offset from start of file */
199 ASSERT(offset
< PAGE_CACHE_SIZE
);
201 full_offset
= page
->index
; /* NB: using 64bit number */
202 full_offset
<<= PAGE_CACHE_SHIFT
; /* offset from file start */
203 full_offset
+= offset
; /* offset from page start */
205 if (full_offset
< iomapp
->iomap_offset
)
207 if (iomapp
->iomap_offset
+ (iomapp
->iomap_bsize
-1) >= full_offset
)
215 struct buffer_head
*bh
,
216 unsigned long offset
,
224 ASSERT(!(iomapp
->iomap_flags
& IOMAP_HOLE
));
225 ASSERT(!(iomapp
->iomap_flags
& IOMAP_DELAY
));
226 ASSERT(iomapp
->iomap_bn
!= IOMAP_DADDR_NULL
);
229 delta
<<= PAGE_CACHE_SHIFT
;
231 delta
-= iomapp
->iomap_offset
;
232 delta
>>= block_bits
;
234 sector_shift
= block_bits
- BBSHIFT
;
235 bn
= iomapp
->iomap_bn
>> sector_shift
;
237 BUG_ON(!bn
&& !(iomapp
->iomap_flags
& IOMAP_REALTIME
));
238 ASSERT((bn
<< sector_shift
) >= iomapp
->iomap_bn
);
242 bh
->b_bdev
= iomapp
->iomap_target
->pbr_bdev
;
243 set_buffer_mapped(bh
);
244 clear_buffer_delay(bh
);
248 * Look for a page at index which is unlocked and contains our
249 * unwritten extent flagged buffers at its head. Returns page
250 * locked and with an extra reference count, and length of the
251 * unwritten extent component on this page that we can write,
252 * in units of filesystem blocks.
255 xfs_probe_unwritten_page(
256 struct address_space
*mapping
,
260 unsigned long max_offset
,
266 page
= find_trylock_page(mapping
, index
);
269 if (PageWriteback(page
))
272 if (page
->mapping
&& page_has_buffers(page
)) {
273 struct buffer_head
*bh
, *head
;
274 unsigned long p_offset
= 0;
277 bh
= head
= page_buffers(page
);
279 if (!buffer_unwritten(bh
) || !buffer_uptodate(bh
))
281 if (!xfs_offset_to_map(page
, iomapp
, p_offset
))
283 if (p_offset
>= max_offset
)
285 xfs_map_at_offset(page
, bh
, p_offset
, bbits
, iomapp
);
286 set_buffer_unwritten_io(bh
);
288 p_offset
+= bh
->b_size
;
290 } while ((bh
= bh
->b_this_page
) != head
);
302 * Look for a page at index which is unlocked and not mapped
303 * yet - clustering for mmap write case.
306 xfs_probe_unmapped_page(
307 struct address_space
*mapping
,
309 unsigned int pg_offset
)
314 page
= find_trylock_page(mapping
, index
);
317 if (PageWriteback(page
))
320 if (page
->mapping
&& PageDirty(page
)) {
321 if (page_has_buffers(page
)) {
322 struct buffer_head
*bh
, *head
;
324 bh
= head
= page_buffers(page
);
326 if (buffer_mapped(bh
) || !buffer_uptodate(bh
))
329 if (ret
>= pg_offset
)
331 } while ((bh
= bh
->b_this_page
) != head
);
333 ret
= PAGE_CACHE_SIZE
;
342 xfs_probe_unmapped_cluster(
344 struct page
*startpage
,
345 struct buffer_head
*bh
,
346 struct buffer_head
*head
)
348 pgoff_t tindex
, tlast
, tloff
;
349 unsigned int pg_offset
, len
, total
= 0;
350 struct address_space
*mapping
= inode
->i_mapping
;
352 /* First sum forwards in this page */
354 if (buffer_mapped(bh
))
357 } while ((bh
= bh
->b_this_page
) != head
);
359 /* If we reached the end of the page, sum forwards in
363 tlast
= i_size_read(inode
) >> PAGE_CACHE_SHIFT
;
364 /* Prune this back to avoid pathological behavior */
365 tloff
= min(tlast
, startpage
->index
+ 64);
366 for (tindex
= startpage
->index
+ 1; tindex
< tloff
; tindex
++) {
367 len
= xfs_probe_unmapped_page(mapping
, tindex
,
373 if (tindex
== tlast
&&
374 (pg_offset
= i_size_read(inode
) & (PAGE_CACHE_SIZE
- 1))) {
375 total
+= xfs_probe_unmapped_page(mapping
,
383 * Probe for a given page (index) in the inode and test if it is delayed
384 * and without unwritten buffers. Returns page locked and with an extra
388 xfs_probe_delalloc_page(
394 page
= find_trylock_page(inode
->i_mapping
, index
);
397 if (PageWriteback(page
))
400 if (page
->mapping
&& page_has_buffers(page
)) {
401 struct buffer_head
*bh
, *head
;
404 bh
= head
= page_buffers(page
);
406 if (buffer_unwritten(bh
)) {
409 } else if (buffer_delay(bh
)) {
412 } while ((bh
= bh
->b_this_page
) != head
);
426 struct page
*start_page
,
427 struct buffer_head
*head
,
428 struct buffer_head
*curr
,
429 unsigned long p_offset
,
432 struct writeback_control
*wbc
,
436 struct buffer_head
*bh
= curr
;
440 unsigned long nblocks
= 0;
442 offset
= start_page
->index
;
443 offset
<<= PAGE_CACHE_SHIFT
;
446 /* get an "empty" pagebuf to manage IO completion
447 * Proper values will be set before returning */
448 pb
= pagebuf_lookup(iomapp
->iomap_target
, 0, 0, 0);
452 /* Take a reference to the inode to prevent it from
453 * being reclaimed while we have outstanding unwritten
456 if ((igrab(inode
)) != inode
) {
461 /* Set the count to 1 initially, this will stop an I/O
462 * completion callout which happens before we have started
463 * all the I/O from calling pagebuf_iodone too early.
465 atomic_set(&pb
->pb_io_remaining
, 1);
467 /* First map forwards in the page consecutive buffers
468 * covering this unwritten extent
471 if (!buffer_unwritten(bh
))
473 tmp
= xfs_offset_to_map(start_page
, iomapp
, p_offset
);
476 xfs_map_at_offset(start_page
, bh
, p_offset
, block_bits
, iomapp
);
477 set_buffer_unwritten_io(bh
);
479 p_offset
+= bh
->b_size
;
481 } while ((bh
= bh
->b_this_page
) != head
);
483 atomic_add(nblocks
, &pb
->pb_io_remaining
);
485 /* If we reached the end of the page, map forwards in any
486 * following pages which are also covered by this extent.
489 struct address_space
*mapping
= inode
->i_mapping
;
490 pgoff_t tindex
, tloff
, tlast
;
492 unsigned int pg_offset
, bbits
= inode
->i_blkbits
;
495 tlast
= i_size_read(inode
) >> PAGE_CACHE_SHIFT
;
496 tloff
= (iomapp
->iomap_offset
+ iomapp
->iomap_bsize
) >> PAGE_CACHE_SHIFT
;
497 tloff
= min(tlast
, tloff
);
498 for (tindex
= start_page
->index
+ 1; tindex
< tloff
; tindex
++) {
499 page
= xfs_probe_unwritten_page(mapping
,
501 PAGE_CACHE_SIZE
, &bs
, bbits
);
505 atomic_add(bs
, &pb
->pb_io_remaining
);
506 xfs_convert_page(inode
, page
, iomapp
, wbc
, pb
,
508 /* stop if converting the next page might add
509 * enough blocks that the corresponding byte
510 * count won't fit in our ulong page buf length */
511 if (nblocks
>= ((ULONG_MAX
- PAGE_SIZE
) >> block_bits
))
515 if (tindex
== tlast
&&
516 (pg_offset
= (i_size_read(inode
) & (PAGE_CACHE_SIZE
- 1)))) {
517 page
= xfs_probe_unwritten_page(mapping
,
519 pg_offset
, &bs
, bbits
);
522 atomic_add(bs
, &pb
->pb_io_remaining
);
523 xfs_convert_page(inode
, page
, iomapp
, wbc
, pb
,
525 if (nblocks
>= ((ULONG_MAX
- PAGE_SIZE
) >> block_bits
))
532 size
= nblocks
; /* NB: using 64bit number here */
533 size
<<= block_bits
; /* convert fsb's to byte range */
537 XFS_BUF_SET_SIZE(pb
, size
);
538 XFS_BUF_SET_COUNT(pb
, size
);
539 XFS_BUF_SET_OFFSET(pb
, offset
);
540 XFS_BUF_SET_FSPRIVATE(pb
, LINVFS_GET_VP(inode
));
541 XFS_BUF_SET_IODONE_FUNC(pb
, linvfs_unwritten_convert
);
543 if (atomic_dec_and_test(&pb
->pb_io_remaining
) == 1) {
544 pagebuf_iodone(pb
, 1, 1);
553 struct writeback_control
*wbc
,
554 struct buffer_head
*bh_arr
[],
559 struct buffer_head
*bh
;
562 BUG_ON(PageWriteback(page
));
563 set_page_writeback(page
);
565 clear_page_dirty(page
);
569 for (i
= 0; i
< bh_count
; i
++) {
571 mark_buffer_async_write(bh
);
572 if (buffer_unwritten(bh
))
573 set_buffer_unwritten_io(bh
);
574 set_buffer_uptodate(bh
);
575 clear_buffer_dirty(bh
);
578 for (i
= 0; i
< bh_count
; i
++)
579 submit_bh(WRITE
, bh_arr
[i
]);
581 if (probed_page
&& clear_dirty
)
582 wbc
->nr_to_write
--; /* Wrote an "extra" page */
584 end_page_writeback(page
);
585 wbc
->pages_skipped
++; /* We didn't write this page */
590 * Allocate & map buffers for page given the extent map. Write it out.
591 * except for the original page of a writepage, this is called on
592 * delalloc/unwritten pages only, for the original page it is possible
593 * that the page has no mapping at all.
600 struct writeback_control
*wbc
,
605 struct buffer_head
*bh_arr
[MAX_BUF_PER_PAGE
], *bh
, *head
;
606 xfs_iomap_t
*mp
= iomapp
, *tmp
;
607 unsigned long end
, offset
;
609 int i
= 0, index
= 0;
610 int bbits
= inode
->i_blkbits
;
612 end_index
= i_size_read(inode
) >> PAGE_CACHE_SHIFT
;
613 if (page
->index
< end_index
) {
614 end
= PAGE_CACHE_SIZE
;
616 end
= i_size_read(inode
) & (PAGE_CACHE_SIZE
-1);
618 bh
= head
= page_buffers(page
);
623 if (!(PageUptodate(page
) || buffer_uptodate(bh
)))
625 if (buffer_mapped(bh
) && all_bh
&&
626 !(buffer_unwritten(bh
) || buffer_delay(bh
))) {
629 bh_arr
[index
++] = bh
;
633 tmp
= xfs_offset_to_map(page
, mp
, offset
);
636 ASSERT(!(tmp
->iomap_flags
& IOMAP_HOLE
));
637 ASSERT(!(tmp
->iomap_flags
& IOMAP_DELAY
));
639 /* If this is a new unwritten extent buffer (i.e. one
640 * that we haven't passed in private data for, we must
641 * now map this buffer too.
643 if (buffer_unwritten(bh
) && !bh
->b_end_io
) {
644 ASSERT(tmp
->iomap_flags
& IOMAP_UNWRITTEN
);
645 xfs_map_unwritten(inode
, page
, head
, bh
, offset
,
646 bbits
, tmp
, wbc
, startio
, all_bh
);
647 } else if (! (buffer_unwritten(bh
) && buffer_locked(bh
))) {
648 xfs_map_at_offset(page
, bh
, offset
, bbits
, tmp
);
649 if (buffer_unwritten(bh
)) {
650 set_buffer_unwritten_io(bh
);
651 bh
->b_private
= private;
656 bh_arr
[index
++] = bh
;
658 set_buffer_dirty(bh
);
660 mark_buffer_dirty(bh
);
662 } while (i
++, (bh
= bh
->b_this_page
) != head
);
665 xfs_submit_page(page
, wbc
, bh_arr
, index
, 1, index
== i
);
672 * Convert & write out a cluster of pages in the same extent as defined
673 * by mp and following the start page.
680 struct writeback_control
*wbc
,
687 for (; tindex
<= tlast
; tindex
++) {
688 page
= xfs_probe_delalloc_page(inode
, tindex
);
691 xfs_convert_page(inode
, page
, iomapp
, wbc
, NULL
,
697 * Calling this without startio set means we are being asked to make a dirty
698 * page ready for freeing it's buffers. When called with startio set then
699 * we are coming from writepage.
701 * When called with startio set it is important that we write the WHOLE
703 * The bh->b_state's cannot know if any of the blocks or which block for
704 * that matter are dirty due to mmap writes, and therefore bh uptodate is
705 * only vaild if the page itself isn't completely uptodate. Some layers
706 * may clear the page dirty flag prior to calling write page, under the
707 * assumption the entire page will be written out; by not writing out the
708 * whole page the page can be reused before all valid dirty data is
709 * written out. Note: in the case of a page that has been dirty'd by
710 * mapwrite and but partially setup by block_prepare_write the
711 * bh->b_states's will not agree and only ones setup by BPW/BCW will have
712 * valid state, thus the whole page must be written out thing.
716 xfs_page_state_convert(
719 struct writeback_control
*wbc
,
721 int unmapped
) /* also implies page uptodate */
723 struct buffer_head
*bh_arr
[MAX_BUF_PER_PAGE
], *bh
, *head
;
724 xfs_iomap_t
*iomp
, iomap
;
726 unsigned long p_offset
= 0;
727 __uint64_t end_offset
;
728 pgoff_t end_index
, last_index
, tlast
;
729 int len
, err
, i
, cnt
= 0, uptodate
= 1;
730 int flags
= startio
? 0 : BMAPI_TRYLOCK
;
735 /* Are we off the end of the file ? */
736 offset
= i_size_read(inode
);
737 end_index
= offset
>> PAGE_CACHE_SHIFT
;
738 last_index
= (offset
- 1) >> PAGE_CACHE_SHIFT
;
739 if (page
->index
>= end_index
) {
740 if ((page
->index
>= end_index
+ 1) ||
741 !(i_size_read(inode
) & (PAGE_CACHE_SIZE
- 1))) {
747 offset
= (loff_t
)page
->index
<< PAGE_CACHE_SHIFT
;
748 end_offset
= min_t(unsigned long long,
749 offset
+ PAGE_CACHE_SIZE
, i_size_read(inode
));
751 bh
= head
= page_buffers(page
);
756 if (offset
>= end_offset
)
758 if (!buffer_uptodate(bh
))
760 if (!(PageUptodate(page
) || buffer_uptodate(bh
)) && !startio
)
764 iomp
= xfs_offset_to_map(page
, &iomap
, p_offset
);
768 * First case, map an unwritten extent and prepare for
769 * extent state conversion transaction on completion.
771 if (buffer_unwritten(bh
)) {
775 err
= xfs_map_blocks(inode
, offset
, len
, &iomap
,
776 BMAPI_READ
|BMAPI_IGNSTATE
);
780 iomp
= xfs_offset_to_map(page
, &iomap
,
785 err
= xfs_map_unwritten(inode
, page
,
787 inode
->i_blkbits
, iomp
,
788 wbc
, startio
, unmapped
);
793 set_bit(BH_Lock
, &bh
->b_state
);
795 BUG_ON(!buffer_locked(bh
));
800 * Second case, allocate space for a delalloc buffer.
801 * We can return EAGAIN here in the release page case.
803 } else if (buffer_delay(bh
)) {
806 err
= xfs_map_blocks(inode
, offset
, len
, &iomap
,
807 BMAPI_ALLOCATE
| flags
);
811 iomp
= xfs_offset_to_map(page
, &iomap
,
815 xfs_map_at_offset(page
, bh
, p_offset
,
816 inode
->i_blkbits
, iomp
);
820 set_buffer_dirty(bh
);
822 mark_buffer_dirty(bh
);
826 } else if ((buffer_uptodate(bh
) || PageUptodate(page
)) &&
827 (unmapped
|| startio
)) {
829 if (!buffer_mapped(bh
)) {
833 * Getting here implies an unmapped buffer
834 * was found, and we are in a path where we
835 * need to write the whole page out.
838 size
= xfs_probe_unmapped_cluster(
839 inode
, page
, bh
, head
);
840 err
= xfs_map_blocks(inode
, offset
,
842 BMAPI_WRITE
|BMAPI_MMAP
);
846 iomp
= xfs_offset_to_map(page
, &iomap
,
850 xfs_map_at_offset(page
,
852 inode
->i_blkbits
, iomp
);
856 set_buffer_dirty(bh
);
858 mark_buffer_dirty(bh
);
862 } else if (startio
) {
863 if (buffer_uptodate(bh
) &&
864 !test_and_set_bit(BH_Lock
, &bh
->b_state
)) {
870 } while (offset
+= len
, p_offset
+= len
,
871 ((bh
= bh
->b_this_page
) != head
));
873 if (uptodate
&& bh
== head
)
874 SetPageUptodate(page
);
877 xfs_submit_page(page
, wbc
, bh_arr
, cnt
, 0, 1);
880 tlast
= (iomp
->iomap_offset
+ iomp
->iomap_bsize
- 1) >>
882 if (delalloc
&& (tlast
> last_index
))
884 xfs_cluster_write(inode
, page
->index
+ 1, iomp
, wbc
,
885 startio
, unmapped
, tlast
);
891 for (i
= 0; i
< cnt
; i
++) {
892 unlock_buffer(bh_arr
[i
]);
896 * If it's delalloc and we have nowhere to put it,
897 * throw it away, unless the lower layers told
900 if (err
!= -EAGAIN
) {
902 block_invalidatepage(page
, 0);
904 ClearPageUptodate(page
);
910 linvfs_get_block_core(
913 unsigned long blocks
,
914 struct buffer_head
*bh_result
,
919 vnode_t
*vp
= LINVFS_GET_VP(inode
);
924 loff_t offset
= (loff_t
)iblock
<< inode
->i_blkbits
;
927 size
= blocks
<< inode
->i_blkbits
;
929 size
= 1 << inode
->i_blkbits
;
931 VOP_BMAP(vp
, offset
, size
,
932 create
? flags
: BMAPI_READ
, &iomap
, &retpbbm
, error
);
939 if (iomap
.iomap_bn
!= IOMAP_DADDR_NULL
) {
943 /* For unwritten extents do not report a disk address on
944 * the read case (treat as if we're reading into a hole).
946 if (create
|| !(iomap
.iomap_flags
& IOMAP_UNWRITTEN
)) {
947 delta
= offset
- iomap
.iomap_offset
;
948 delta
>>= inode
->i_blkbits
;
950 bn
= iomap
.iomap_bn
>> (inode
->i_blkbits
- BBSHIFT
);
952 BUG_ON(!bn
&& !(iomap
.iomap_flags
& IOMAP_REALTIME
));
953 bh_result
->b_blocknr
= bn
;
954 set_buffer_mapped(bh_result
);
956 if (create
&& (iomap
.iomap_flags
& IOMAP_UNWRITTEN
)) {
958 bh_result
->b_private
= inode
;
959 set_buffer_unwritten(bh_result
);
960 set_buffer_delay(bh_result
);
964 /* If this is a realtime file, data might be on a new device */
965 bh_result
->b_bdev
= iomap
.iomap_target
->pbr_bdev
;
967 /* If we previously allocated a block out beyond eof and
968 * we are now coming back to use it then we will need to
969 * flag it as new even if it has a disk address.
972 ((!buffer_mapped(bh_result
) && !buffer_uptodate(bh_result
)) ||
973 (offset
>= i_size_read(inode
)) || (iomap
.iomap_flags
& IOMAP_NEW
))) {
974 set_buffer_new(bh_result
);
977 if (iomap
.iomap_flags
& IOMAP_DELAY
) {
980 set_buffer_mapped(bh_result
);
981 set_buffer_uptodate(bh_result
);
983 set_buffer_delay(bh_result
);
987 bh_result
->b_size
= (ssize_t
)min(
988 (loff_t
)(iomap
.iomap_bsize
- iomap
.iomap_delta
),
989 (loff_t
)(blocks
<< inode
->i_blkbits
));
999 struct buffer_head
*bh_result
,
1002 return linvfs_get_block_core(inode
, iblock
, 0, bh_result
,
1003 create
, 0, BMAPI_WRITE
);
1007 linvfs_get_blocks_direct(
1008 struct inode
*inode
,
1010 unsigned long max_blocks
,
1011 struct buffer_head
*bh_result
,
1014 return linvfs_get_block_core(inode
, iblock
, max_blocks
, bh_result
,
1015 create
, 1, BMAPI_WRITE
|BMAPI_DIRECT
);
1022 const struct iovec
*iov
,
1024 unsigned long nr_segs
)
1026 struct file
*file
= iocb
->ki_filp
;
1027 struct inode
*inode
= file
->f_mapping
->host
;
1028 vnode_t
*vp
= LINVFS_GET_VP(inode
);
1033 VOP_BMAP(vp
, offset
, 0, BMAPI_DEVICE
, &iomap
, &maps
, error
);
1037 return blockdev_direct_IO_own_locking(rw
, iocb
, inode
,
1038 iomap
.iomap_target
->pbr_bdev
,
1039 iov
, offset
, nr_segs
,
1040 linvfs_get_blocks_direct
,
1041 linvfs_unwritten_convert_direct
);
1047 struct address_space
*mapping
,
1050 struct inode
*inode
= (struct inode
*)mapping
->host
;
1051 vnode_t
*vp
= LINVFS_GET_VP(inode
);
1054 vn_trace_entry(vp
, "linvfs_bmap", (inst_t
*)__return_address
);
1056 VOP_RWLOCK(vp
, VRWLOCK_READ
);
1057 VOP_FLUSH_PAGES(vp
, (xfs_off_t
)0, -1, 0, FI_REMAPF
, error
);
1058 VOP_RWUNLOCK(vp
, VRWLOCK_READ
);
1059 return generic_block_bmap(mapping
, block
, linvfs_get_block
);
1064 struct file
*unused
,
1067 return mpage_readpage(page
, linvfs_get_block
);
1072 struct file
*unused
,
1073 struct address_space
*mapping
,
1074 struct list_head
*pages
,
1077 return mpage_readpages(mapping
, pages
, nr_pages
, linvfs_get_block
);
1081 xfs_count_page_state(
1087 struct buffer_head
*bh
, *head
;
1089 *delalloc
= *unmapped
= *unwritten
= 0;
1091 bh
= head
= page_buffers(page
);
1093 if (buffer_uptodate(bh
) && !buffer_mapped(bh
))
1095 else if (buffer_unwritten(bh
) && !buffer_delay(bh
))
1096 clear_buffer_unwritten(bh
);
1097 else if (buffer_unwritten(bh
))
1099 else if (buffer_delay(bh
))
1101 } while ((bh
= bh
->b_this_page
) != head
);
1106 * writepage: Called from one of two places:
1108 * 1. we are flushing a delalloc buffer head.
1110 * 2. we are writing out a dirty page. Typically the page dirty
1111 * state is cleared before we get here. In this case is it
1112 * conceivable we have no buffer heads.
1114 * For delalloc space on the page we need to allocate space and
1115 * flush it. For unmapped buffer heads on the page we should
1116 * allocate space if the page is uptodate. For any other dirty
1117 * buffer heads on the page we should flush them.
1119 * If we detect that a transaction would be required to flush
1120 * the page, we have to check the process flags first, if we
1121 * are already in a transaction or disk I/O during allocations
1122 * is off, we need to fail the writepage and redirty the page.
1128 struct writeback_control
*wbc
)
1132 int delalloc
, unmapped
, unwritten
;
1133 struct inode
*inode
= page
->mapping
->host
;
1135 xfs_page_trace(XFS_WRITEPAGE_ENTER
, inode
, page
, 0);
1138 * We need a transaction if:
1139 * 1. There are delalloc buffers on the page
1140 * 2. The page is uptodate and we have unmapped buffers
1141 * 3. The page is uptodate and we have no buffers
1142 * 4. There are unwritten buffers on the page
1145 if (!page_has_buffers(page
)) {
1149 xfs_count_page_state(page
, &delalloc
, &unmapped
, &unwritten
);
1150 if (!PageUptodate(page
))
1152 need_trans
= delalloc
+ unmapped
+ unwritten
;
1156 * If we need a transaction and the process flags say
1157 * we are already in a transaction, or no IO is allowed
1158 * then mark the page dirty again and leave the page
1161 if (PFLAGS_TEST_FSTRANS() && need_trans
)
1165 * Delay hooking up buffer heads until we have
1166 * made our go/no-go decision.
1168 if (!page_has_buffers(page
))
1169 create_empty_buffers(page
, 1 << inode
->i_blkbits
, 0);
1172 * Convert delayed allocate, unwritten or unmapped space
1173 * to real space and flush out to disk.
1175 error
= xfs_page_state_convert(inode
, page
, wbc
, 1, unmapped
);
1176 if (error
== -EAGAIN
)
1178 if (unlikely(error
< 0))
1184 redirty_page_for_writepage(wbc
, page
);
1193 * Called to move a page into cleanable state - and from there
1194 * to be released. Possibly the page is already clean. We always
1195 * have buffer heads in this call.
1197 * Returns 0 if the page is ok to release, 1 otherwise.
1199 * Possible scenarios are:
1201 * 1. We are being called to release a page which has been written
1202 * to via regular I/O. buffer heads will be dirty and possibly
1203 * delalloc. If no delalloc buffer heads in this case then we
1204 * can just return zero.
1206 * 2. We are called to release a page which has been written via
1207 * mmap, all we need to do is ensure there is no delalloc
1208 * state in the buffer heads, if not we can let the caller
1209 * free them and we should come back later via writepage.
1212 linvfs_release_page(
1216 struct inode
*inode
= page
->mapping
->host
;
1217 int dirty
, delalloc
, unmapped
, unwritten
;
1218 struct writeback_control wbc
= {
1219 .sync_mode
= WB_SYNC_ALL
,
1223 xfs_page_trace(XFS_RELEASEPAGE_ENTER
, inode
, page
, gfp_mask
);
1225 xfs_count_page_state(page
, &delalloc
, &unmapped
, &unwritten
);
1226 if (!delalloc
&& !unwritten
)
1229 if (!(gfp_mask
& __GFP_FS
))
1232 /* If we are already inside a transaction or the thread cannot
1233 * do I/O, we cannot release this page.
1235 if (PFLAGS_TEST_FSTRANS())
1239 * Convert delalloc space to real space, do not flush the
1240 * data out to disk, that will be done by the caller.
1241 * Never need to allocate space here - we will always
1242 * come back to writepage in that case.
1244 dirty
= xfs_page_state_convert(inode
, page
, &wbc
, 0, 0);
1245 if (dirty
== 0 && !unwritten
)
1250 return try_to_free_buffers(page
);
1254 linvfs_prepare_write(
1260 return block_prepare_write(page
, from
, to
, linvfs_get_block
);
1263 struct address_space_operations linvfs_aops
= {
1264 .readpage
= linvfs_readpage
,
1265 .readpages
= linvfs_readpages
,
1266 .writepage
= linvfs_writepage
,
1267 .sync_page
= block_sync_page
,
1268 .releasepage
= linvfs_release_page
,
1269 .prepare_write
= linvfs_prepare_write
,
1270 .commit_write
= generic_commit_write
,
1271 .bmap
= linvfs_bmap
,
1272 .direct_IO
= linvfs_direct_IO
,