4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/f2fs_fs.h>
13 #include <linux/buffer_head.h>
14 #include <linux/mpage.h>
15 #include <linux/writeback.h>
16 #include <linux/backing-dev.h>
17 #include <linux/pagevec.h>
18 #include <linux/blkdev.h>
19 #include <linux/bio.h>
20 #include <linux/prefetch.h>
21 #include <linux/uio.h>
22 #include <linux/cleancache.h>
28 #include <trace/events/f2fs.h>
30 static void f2fs_read_end_io(struct bio
*bio
, int err
)
35 if (f2fs_bio_encrypted(bio
)) {
37 f2fs_release_crypto_ctx(bio
->bi_private
);
39 f2fs_end_io_crypto_work(bio
->bi_private
, bio
);
44 bio_for_each_segment_all(bvec
, bio
, i
) {
45 struct page
*page
= bvec
->bv_page
;
48 SetPageUptodate(page
);
50 ClearPageUptodate(page
);
58 static void f2fs_write_end_io(struct bio
*bio
, int err
)
60 struct f2fs_sb_info
*sbi
= bio
->bi_private
;
64 bio_for_each_segment_all(bvec
, bio
, i
) {
65 struct page
*page
= bvec
->bv_page
;
67 f2fs_restore_and_release_control_page(&page
);
71 set_bit(AS_EIO
, &page
->mapping
->flags
);
72 f2fs_stop_checkpoint(sbi
);
74 end_page_writeback(page
);
75 dec_page_count(sbi
, F2FS_WRITEBACK
);
78 if (!get_pages(sbi
, F2FS_WRITEBACK
) &&
79 !list_empty(&sbi
->cp_wait
.task_list
))
80 wake_up(&sbi
->cp_wait
);
86 * Low-level block read/write IO operations.
88 static struct bio
*__bio_alloc(struct f2fs_sb_info
*sbi
, block_t blk_addr
,
89 int npages
, bool is_read
)
93 /* No failure on bio allocation */
94 bio
= bio_alloc(GFP_NOIO
, npages
);
96 bio
->bi_bdev
= sbi
->sb
->s_bdev
;
97 bio
->bi_iter
.bi_sector
= SECTOR_FROM_BLOCK(blk_addr
);
98 bio
->bi_end_io
= is_read
? f2fs_read_end_io
: f2fs_write_end_io
;
99 bio
->bi_private
= is_read
? NULL
: sbi
;
104 static void __submit_merged_bio(struct f2fs_bio_info
*io
)
106 struct f2fs_io_info
*fio
= &io
->fio
;
111 if (is_read_io(fio
->rw
))
112 trace_f2fs_submit_read_bio(io
->sbi
->sb
, fio
, io
->bio
);
114 trace_f2fs_submit_write_bio(io
->sbi
->sb
, fio
, io
->bio
);
116 submit_bio(fio
->rw
, io
->bio
);
120 void f2fs_submit_merged_bio(struct f2fs_sb_info
*sbi
,
121 enum page_type type
, int rw
)
123 enum page_type btype
= PAGE_TYPE_OF_BIO(type
);
124 struct f2fs_bio_info
*io
;
126 io
= is_read_io(rw
) ? &sbi
->read_io
: &sbi
->write_io
[btype
];
128 down_write(&io
->io_rwsem
);
130 /* change META to META_FLUSH in the checkpoint procedure */
131 if (type
>= META_FLUSH
) {
132 io
->fio
.type
= META_FLUSH
;
133 if (test_opt(sbi
, NOBARRIER
))
134 io
->fio
.rw
= WRITE_FLUSH
| REQ_META
| REQ_PRIO
;
136 io
->fio
.rw
= WRITE_FLUSH_FUA
| REQ_META
| REQ_PRIO
;
138 __submit_merged_bio(io
);
139 up_write(&io
->io_rwsem
);
143 * Fill the locked page with data located in the block address.
144 * Return unlocked page.
146 int f2fs_submit_page_bio(struct f2fs_io_info
*fio
)
149 struct page
*page
= fio
->encrypted_page
? fio
->encrypted_page
: fio
->page
;
151 trace_f2fs_submit_page_bio(page
, fio
);
152 f2fs_trace_ios(fio
, 0);
154 /* Allocate a new bio */
155 bio
= __bio_alloc(fio
->sbi
, fio
->blk_addr
, 1, is_read_io(fio
->rw
));
157 if (bio_add_page(bio
, page
, PAGE_CACHE_SIZE
, 0) < PAGE_CACHE_SIZE
) {
162 submit_bio(fio
->rw
, bio
);
166 void f2fs_submit_page_mbio(struct f2fs_io_info
*fio
)
168 struct f2fs_sb_info
*sbi
= fio
->sbi
;
169 enum page_type btype
= PAGE_TYPE_OF_BIO(fio
->type
);
170 struct f2fs_bio_info
*io
;
171 bool is_read
= is_read_io(fio
->rw
);
172 struct page
*bio_page
;
174 io
= is_read
? &sbi
->read_io
: &sbi
->write_io
[btype
];
176 verify_block_addr(sbi
, fio
->blk_addr
);
178 down_write(&io
->io_rwsem
);
181 inc_page_count(sbi
, F2FS_WRITEBACK
);
183 if (io
->bio
&& (io
->last_block_in_bio
!= fio
->blk_addr
- 1 ||
184 io
->fio
.rw
!= fio
->rw
))
185 __submit_merged_bio(io
);
187 if (io
->bio
== NULL
) {
188 int bio_blocks
= MAX_BIO_BLOCKS(sbi
);
190 io
->bio
= __bio_alloc(sbi
, fio
->blk_addr
, bio_blocks
, is_read
);
194 bio_page
= fio
->encrypted_page
? fio
->encrypted_page
: fio
->page
;
196 if (bio_add_page(io
->bio
, bio_page
, PAGE_CACHE_SIZE
, 0) <
198 __submit_merged_bio(io
);
202 io
->last_block_in_bio
= fio
->blk_addr
;
203 f2fs_trace_ios(fio
, 0);
205 up_write(&io
->io_rwsem
);
206 trace_f2fs_submit_page_mbio(fio
->page
, fio
);
210 * Lock ordering for the change of data block address:
213 * update block addresses in the node page
215 void set_data_blkaddr(struct dnode_of_data
*dn
)
217 struct f2fs_node
*rn
;
219 struct page
*node_page
= dn
->node_page
;
220 unsigned int ofs_in_node
= dn
->ofs_in_node
;
222 f2fs_wait_on_page_writeback(node_page
, NODE
);
224 rn
= F2FS_NODE(node_page
);
226 /* Get physical address of data block */
227 addr_array
= blkaddr_in_node(rn
);
228 addr_array
[ofs_in_node
] = cpu_to_le32(dn
->data_blkaddr
);
229 set_page_dirty(node_page
);
232 int reserve_new_block(struct dnode_of_data
*dn
)
234 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
236 if (unlikely(is_inode_flag_set(F2FS_I(dn
->inode
), FI_NO_ALLOC
)))
238 if (unlikely(!inc_valid_block_count(sbi
, dn
->inode
, 1)))
241 trace_f2fs_reserve_new_block(dn
->inode
, dn
->nid
, dn
->ofs_in_node
);
243 dn
->data_blkaddr
= NEW_ADDR
;
244 set_data_blkaddr(dn
);
245 mark_inode_dirty(dn
->inode
);
250 int f2fs_reserve_block(struct dnode_of_data
*dn
, pgoff_t index
)
252 bool need_put
= dn
->inode_page
? false : true;
255 err
= get_dnode_of_data(dn
, index
, ALLOC_NODE
);
259 if (dn
->data_blkaddr
== NULL_ADDR
)
260 err
= reserve_new_block(dn
);
266 struct page
*get_read_data_page(struct inode
*inode
, pgoff_t index
, int rw
)
268 struct address_space
*mapping
= inode
->i_mapping
;
269 struct dnode_of_data dn
;
271 struct extent_info ei
;
273 struct f2fs_io_info fio
= {
274 .sbi
= F2FS_I_SB(inode
),
277 .encrypted_page
= NULL
,
280 if (f2fs_encrypted_inode(inode
) && S_ISREG(inode
->i_mode
))
281 return read_mapping_page(mapping
, index
, NULL
);
283 page
= grab_cache_page(mapping
, index
);
285 return ERR_PTR(-ENOMEM
);
287 if (f2fs_lookup_extent_cache(inode
, index
, &ei
)) {
288 dn
.data_blkaddr
= ei
.blk
+ index
- ei
.fofs
;
292 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
293 err
= get_dnode_of_data(&dn
, index
, LOOKUP_NODE
);
298 if (unlikely(dn
.data_blkaddr
== NULL_ADDR
)) {
303 if (PageUptodate(page
)) {
309 * A new dentry page is allocated but not able to be written, since its
310 * new inode page couldn't be allocated due to -ENOSPC.
311 * In such the case, its blkaddr can be remained as NEW_ADDR.
312 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
314 if (dn
.data_blkaddr
== NEW_ADDR
) {
315 zero_user_segment(page
, 0, PAGE_CACHE_SIZE
);
316 SetPageUptodate(page
);
321 fio
.blk_addr
= dn
.data_blkaddr
;
323 err
= f2fs_submit_page_bio(&fio
);
329 f2fs_put_page(page
, 1);
333 struct page
*find_data_page(struct inode
*inode
, pgoff_t index
)
335 struct address_space
*mapping
= inode
->i_mapping
;
338 page
= find_get_page(mapping
, index
);
339 if (page
&& PageUptodate(page
))
341 f2fs_put_page(page
, 0);
343 page
= get_read_data_page(inode
, index
, READ_SYNC
);
347 if (PageUptodate(page
))
350 wait_on_page_locked(page
);
351 if (unlikely(!PageUptodate(page
))) {
352 f2fs_put_page(page
, 0);
353 return ERR_PTR(-EIO
);
359 * If it tries to access a hole, return an error.
360 * Because, the callers, functions in dir.c and GC, should be able to know
361 * whether this page exists or not.
363 struct page
*get_lock_data_page(struct inode
*inode
, pgoff_t index
)
365 struct address_space
*mapping
= inode
->i_mapping
;
368 page
= get_read_data_page(inode
, index
, READ_SYNC
);
372 /* wait for read completion */
374 if (unlikely(!PageUptodate(page
))) {
375 f2fs_put_page(page
, 1);
376 return ERR_PTR(-EIO
);
378 if (unlikely(page
->mapping
!= mapping
)) {
379 f2fs_put_page(page
, 1);
386 * Caller ensures that this data page is never allocated.
387 * A new zero-filled data page is allocated in the page cache.
389 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
391 * Note that, ipage is set only by make_empty_dir.
393 struct page
*get_new_data_page(struct inode
*inode
,
394 struct page
*ipage
, pgoff_t index
, bool new_i_size
)
396 struct address_space
*mapping
= inode
->i_mapping
;
398 struct dnode_of_data dn
;
401 page
= grab_cache_page(mapping
, index
);
403 return ERR_PTR(-ENOMEM
);
405 set_new_dnode(&dn
, inode
, ipage
, NULL
, 0);
406 err
= f2fs_reserve_block(&dn
, index
);
408 f2fs_put_page(page
, 1);
414 if (PageUptodate(page
))
417 if (dn
.data_blkaddr
== NEW_ADDR
) {
418 zero_user_segment(page
, 0, PAGE_CACHE_SIZE
);
419 SetPageUptodate(page
);
421 f2fs_put_page(page
, 1);
423 page
= get_read_data_page(inode
, index
, READ_SYNC
);
427 /* wait for read completion */
432 i_size_read(inode
) < ((index
+ 1) << PAGE_CACHE_SHIFT
)) {
433 i_size_write(inode
, ((index
+ 1) << PAGE_CACHE_SHIFT
));
434 /* Only the directory inode sets new_i_size */
435 set_inode_flag(F2FS_I(inode
), FI_UPDATE_DIR
);
440 static int __allocate_data_block(struct dnode_of_data
*dn
)
442 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
443 struct f2fs_inode_info
*fi
= F2FS_I(dn
->inode
);
444 struct f2fs_summary sum
;
446 int seg
= CURSEG_WARM_DATA
;
449 if (unlikely(is_inode_flag_set(F2FS_I(dn
->inode
), FI_NO_ALLOC
)))
452 dn
->data_blkaddr
= datablock_addr(dn
->node_page
, dn
->ofs_in_node
);
453 if (dn
->data_blkaddr
== NEW_ADDR
)
456 if (unlikely(!inc_valid_block_count(sbi
, dn
->inode
, 1)))
460 get_node_info(sbi
, dn
->nid
, &ni
);
461 set_summary(&sum
, dn
->nid
, dn
->ofs_in_node
, ni
.version
);
463 if (dn
->ofs_in_node
== 0 && dn
->inode_page
== dn
->node_page
)
464 seg
= CURSEG_DIRECT_IO
;
466 allocate_data_block(sbi
, NULL
, dn
->data_blkaddr
, &dn
->data_blkaddr
,
468 set_data_blkaddr(dn
);
471 fofs
= start_bidx_of_node(ofs_of_node(dn
->node_page
), fi
) +
473 if (i_size_read(dn
->inode
) < ((fofs
+ 1) << PAGE_CACHE_SHIFT
))
474 i_size_write(dn
->inode
, ((fofs
+ 1) << PAGE_CACHE_SHIFT
));
476 /* direct IO doesn't use extent cache to maximize the performance */
477 f2fs_drop_largest_extent(dn
->inode
, fofs
);
482 static void __allocate_data_blocks(struct inode
*inode
, loff_t offset
,
485 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
486 struct dnode_of_data dn
;
487 u64 start
= F2FS_BYTES_TO_BLK(offset
);
488 u64 len
= F2FS_BYTES_TO_BLK(count
);
493 f2fs_balance_fs(sbi
);
496 /* When reading holes, we need its node page */
497 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
498 if (get_dnode_of_data(&dn
, start
, ALLOC_NODE
))
502 end_offset
= ADDRS_PER_PAGE(dn
.node_page
, F2FS_I(inode
));
504 while (dn
.ofs_in_node
< end_offset
&& len
) {
507 blkaddr
= datablock_addr(dn
.node_page
, dn
.ofs_in_node
);
508 if (blkaddr
== NULL_ADDR
|| blkaddr
== NEW_ADDR
) {
509 if (__allocate_data_block(&dn
))
519 sync_inode_page(&dn
);
528 sync_inode_page(&dn
);
536 * f2fs_map_blocks() now supported readahead/bmap/rw direct_IO with
537 * f2fs_map_blocks structure.
538 * If original data blocks are allocated, then give them to blockdev.
540 * a. preallocate requested block addresses
541 * b. do not use extent cache for better performance
542 * c. give the block addresses to blockdev
544 static int f2fs_map_blocks(struct inode
*inode
, struct f2fs_map_blocks
*map
,
545 int create
, bool fiemap
)
547 unsigned int maxblocks
= map
->m_len
;
548 struct dnode_of_data dn
;
549 int mode
= create
? ALLOC_NODE
: LOOKUP_NODE_RA
;
550 pgoff_t pgofs
, end_offset
;
551 int err
= 0, ofs
= 1;
552 struct extent_info ei
;
553 bool allocated
= false;
558 /* it only supports block size == page size */
559 pgofs
= (pgoff_t
)map
->m_lblk
;
561 if (f2fs_lookup_extent_cache(inode
, pgofs
, &ei
)) {
562 map
->m_pblk
= ei
.blk
+ pgofs
- ei
.fofs
;
563 map
->m_len
= min((pgoff_t
)maxblocks
, ei
.fofs
+ ei
.len
- pgofs
);
564 map
->m_flags
= F2FS_MAP_MAPPED
;
569 f2fs_lock_op(F2FS_I_SB(inode
));
571 /* When reading holes, we need its node page */
572 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
573 err
= get_dnode_of_data(&dn
, pgofs
, mode
);
579 if (dn
.data_blkaddr
== NEW_ADDR
&& !fiemap
)
582 if (dn
.data_blkaddr
!= NULL_ADDR
) {
583 map
->m_flags
= F2FS_MAP_MAPPED
;
584 map
->m_pblk
= dn
.data_blkaddr
;
585 if (dn
.data_blkaddr
== NEW_ADDR
)
586 map
->m_flags
|= F2FS_MAP_UNWRITTEN
;
588 err
= __allocate_data_block(&dn
);
592 map
->m_flags
= F2FS_MAP_NEW
| F2FS_MAP_MAPPED
;
593 map
->m_pblk
= dn
.data_blkaddr
;
598 end_offset
= ADDRS_PER_PAGE(dn
.node_page
, F2FS_I(inode
));
604 if (dn
.ofs_in_node
>= end_offset
) {
606 sync_inode_page(&dn
);
610 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
611 err
= get_dnode_of_data(&dn
, pgofs
, mode
);
617 if (dn
.data_blkaddr
== NEW_ADDR
&& !fiemap
)
620 end_offset
= ADDRS_PER_PAGE(dn
.node_page
, F2FS_I(inode
));
623 if (maxblocks
> map
->m_len
) {
624 block_t blkaddr
= datablock_addr(dn
.node_page
, dn
.ofs_in_node
);
625 if (blkaddr
== NULL_ADDR
&& create
) {
626 err
= __allocate_data_block(&dn
);
630 map
->m_flags
|= F2FS_MAP_NEW
;
631 blkaddr
= dn
.data_blkaddr
;
633 /* Give more consecutive addresses for the readahead */
634 if ((map
->m_pblk
!= NEW_ADDR
&&
635 blkaddr
== (map
->m_pblk
+ ofs
)) ||
636 (map
->m_pblk
== NEW_ADDR
&&
637 blkaddr
== NEW_ADDR
)) {
647 sync_inode_page(&dn
);
652 f2fs_unlock_op(F2FS_I_SB(inode
));
654 trace_f2fs_map_blocks(inode
, map
, err
);
658 static int __get_data_block(struct inode
*inode
, sector_t iblock
,
659 struct buffer_head
*bh
, int create
, bool fiemap
)
661 struct f2fs_map_blocks map
;
665 map
.m_len
= bh
->b_size
>> inode
->i_blkbits
;
667 ret
= f2fs_map_blocks(inode
, &map
, create
, fiemap
);
669 map_bh(bh
, inode
->i_sb
, map
.m_pblk
);
670 bh
->b_state
= (bh
->b_state
& ~F2FS_MAP_FLAGS
) | map
.m_flags
;
671 bh
->b_size
= map
.m_len
<< inode
->i_blkbits
;
676 static int get_data_block(struct inode
*inode
, sector_t iblock
,
677 struct buffer_head
*bh_result
, int create
)
679 return __get_data_block(inode
, iblock
, bh_result
, create
, false);
682 static int get_data_block_fiemap(struct inode
*inode
, sector_t iblock
,
683 struct buffer_head
*bh_result
, int create
)
685 return __get_data_block(inode
, iblock
, bh_result
, create
, true);
688 static inline sector_t
logical_to_blk(struct inode
*inode
, loff_t offset
)
690 return (offset
>> inode
->i_blkbits
);
693 static inline loff_t
blk_to_logical(struct inode
*inode
, sector_t blk
)
695 return (blk
<< inode
->i_blkbits
);
698 int f2fs_fiemap(struct inode
*inode
, struct fiemap_extent_info
*fieinfo
,
701 struct buffer_head map_bh
;
702 sector_t start_blk
, last_blk
;
703 loff_t isize
= i_size_read(inode
);
704 u64 logical
= 0, phys
= 0, size
= 0;
706 bool past_eof
= false, whole_file
= false;
709 ret
= fiemap_check_flags(fieinfo
, FIEMAP_FLAG_SYNC
);
713 mutex_lock(&inode
->i_mutex
);
720 if (logical_to_blk(inode
, len
) == 0)
721 len
= blk_to_logical(inode
, 1);
723 start_blk
= logical_to_blk(inode
, start
);
724 last_blk
= logical_to_blk(inode
, start
+ len
- 1);
726 memset(&map_bh
, 0, sizeof(struct buffer_head
));
729 ret
= get_data_block_fiemap(inode
, start_blk
, &map_bh
, 0);
734 if (!buffer_mapped(&map_bh
)) {
737 if (!past_eof
&& blk_to_logical(inode
, start_blk
) >= isize
)
740 if (past_eof
&& size
) {
741 flags
|= FIEMAP_EXTENT_LAST
;
742 ret
= fiemap_fill_next_extent(fieinfo
, logical
,
745 ret
= fiemap_fill_next_extent(fieinfo
, logical
,
750 /* if we have holes up to/past EOF then we're done */
751 if (start_blk
> last_blk
|| past_eof
|| ret
)
754 if (start_blk
> last_blk
&& !whole_file
) {
755 ret
= fiemap_fill_next_extent(fieinfo
, logical
,
761 * if size != 0 then we know we already have an extent
765 ret
= fiemap_fill_next_extent(fieinfo
, logical
,
771 logical
= blk_to_logical(inode
, start_blk
);
772 phys
= blk_to_logical(inode
, map_bh
.b_blocknr
);
773 size
= map_bh
.b_size
;
775 if (buffer_unwritten(&map_bh
))
776 flags
= FIEMAP_EXTENT_UNWRITTEN
;
778 start_blk
+= logical_to_blk(inode
, size
);
781 * If we are past the EOF, then we need to make sure as
782 * soon as we find a hole that the last extent we found
783 * is marked with FIEMAP_EXTENT_LAST
785 if (!past_eof
&& logical
+ size
>= isize
)
789 if (fatal_signal_pending(current
))
797 mutex_unlock(&inode
->i_mutex
);
802 * This function was originally taken from fs/mpage.c, and customized for f2fs.
803 * Major change was from block_size == page_size in f2fs by default.
805 static int f2fs_mpage_readpages(struct address_space
*mapping
,
806 struct list_head
*pages
, struct page
*page
,
809 struct bio
*bio
= NULL
;
811 sector_t last_block_in_bio
= 0;
812 struct inode
*inode
= mapping
->host
;
813 const unsigned blkbits
= inode
->i_blkbits
;
814 const unsigned blocksize
= 1 << blkbits
;
815 sector_t block_in_file
;
817 sector_t last_block_in_file
;
819 struct block_device
*bdev
= inode
->i_sb
->s_bdev
;
820 struct f2fs_map_blocks map
;
827 for (page_idx
= 0; nr_pages
; page_idx
++, nr_pages
--) {
829 prefetchw(&page
->flags
);
831 page
= list_entry(pages
->prev
, struct page
, lru
);
832 list_del(&page
->lru
);
833 if (add_to_page_cache_lru(page
, mapping
,
834 page
->index
, GFP_KERNEL
))
838 block_in_file
= (sector_t
)page
->index
;
839 last_block
= block_in_file
+ nr_pages
;
840 last_block_in_file
= (i_size_read(inode
) + blocksize
- 1) >>
842 if (last_block
> last_block_in_file
)
843 last_block
= last_block_in_file
;
846 * Map blocks using the previous result first.
848 if ((map
.m_flags
& F2FS_MAP_MAPPED
) &&
849 block_in_file
> map
.m_lblk
&&
850 block_in_file
< (map
.m_lblk
+ map
.m_len
))
854 * Then do more f2fs_map_blocks() calls until we are
855 * done with this page.
859 if (block_in_file
< last_block
) {
860 map
.m_lblk
= block_in_file
;
861 map
.m_len
= last_block
- block_in_file
;
863 if (f2fs_map_blocks(inode
, &map
, 0, false))
867 if ((map
.m_flags
& F2FS_MAP_MAPPED
)) {
868 block_nr
= map
.m_pblk
+ block_in_file
- map
.m_lblk
;
869 SetPageMappedToDisk(page
);
871 if (!PageUptodate(page
) && !cleancache_get_page(page
)) {
872 SetPageUptodate(page
);
876 zero_user_segment(page
, 0, PAGE_CACHE_SIZE
);
877 SetPageUptodate(page
);
883 * This page will go to BIO. Do we need to send this
886 if (bio
&& (last_block_in_bio
!= block_nr
- 1)) {
888 submit_bio(READ
, bio
);
892 struct f2fs_crypto_ctx
*ctx
= NULL
;
894 if (f2fs_encrypted_inode(inode
) &&
895 S_ISREG(inode
->i_mode
)) {
898 ctx
= f2fs_get_crypto_ctx(inode
);
902 /* wait the page to be moved by cleaning */
903 cpage
= find_lock_page(
904 META_MAPPING(F2FS_I_SB(inode
)),
907 f2fs_wait_on_page_writeback(cpage
,
909 f2fs_put_page(cpage
, 1);
913 bio
= bio_alloc(GFP_KERNEL
,
914 min_t(int, nr_pages
, bio_get_nr_vecs(bdev
)));
917 f2fs_release_crypto_ctx(ctx
);
921 bio
->bi_iter
.bi_sector
= SECTOR_FROM_BLOCK(block_nr
);
922 bio
->bi_end_io
= f2fs_read_end_io
;
923 bio
->bi_private
= ctx
;
926 if (bio_add_page(bio
, page
, blocksize
, 0) < blocksize
)
927 goto submit_and_realloc
;
929 last_block_in_bio
= block_nr
;
933 zero_user_segment(page
, 0, PAGE_CACHE_SIZE
);
938 submit_bio(READ
, bio
);
944 page_cache_release(page
);
946 BUG_ON(pages
&& !list_empty(pages
));
948 submit_bio(READ
, bio
);
952 static int f2fs_read_data_page(struct file
*file
, struct page
*page
)
954 struct inode
*inode
= page
->mapping
->host
;
957 trace_f2fs_readpage(page
, DATA
);
959 /* If the file has inline data, try to read it directly */
960 if (f2fs_has_inline_data(inode
))
961 ret
= f2fs_read_inline_data(inode
, page
);
963 ret
= f2fs_mpage_readpages(page
->mapping
, NULL
, page
, 1);
967 static int f2fs_read_data_pages(struct file
*file
,
968 struct address_space
*mapping
,
969 struct list_head
*pages
, unsigned nr_pages
)
971 struct inode
*inode
= file
->f_mapping
->host
;
973 /* If the file has inline data, skip readpages */
974 if (f2fs_has_inline_data(inode
))
977 return f2fs_mpage_readpages(mapping
, pages
, NULL
, nr_pages
);
980 int do_write_data_page(struct f2fs_io_info
*fio
)
982 struct page
*page
= fio
->page
;
983 struct inode
*inode
= page
->mapping
->host
;
984 struct dnode_of_data dn
;
987 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
988 err
= get_dnode_of_data(&dn
, page
->index
, LOOKUP_NODE
);
992 fio
->blk_addr
= dn
.data_blkaddr
;
994 /* This page is already truncated */
995 if (fio
->blk_addr
== NULL_ADDR
) {
996 ClearPageUptodate(page
);
1000 if (f2fs_encrypted_inode(inode
) && S_ISREG(inode
->i_mode
)) {
1001 fio
->encrypted_page
= f2fs_encrypt(inode
, fio
->page
);
1002 if (IS_ERR(fio
->encrypted_page
)) {
1003 err
= PTR_ERR(fio
->encrypted_page
);
1008 set_page_writeback(page
);
1011 * If current allocation needs SSR,
1012 * it had better in-place writes for updated data.
1014 if (unlikely(fio
->blk_addr
!= NEW_ADDR
&&
1015 !is_cold_data(page
) &&
1016 need_inplace_update(inode
))) {
1017 rewrite_data_page(fio
);
1018 set_inode_flag(F2FS_I(inode
), FI_UPDATE_WRITE
);
1019 trace_f2fs_do_write_data_page(page
, IPU
);
1021 write_data_page(&dn
, fio
);
1022 set_data_blkaddr(&dn
);
1023 f2fs_update_extent_cache(&dn
);
1024 trace_f2fs_do_write_data_page(page
, OPU
);
1025 set_inode_flag(F2FS_I(inode
), FI_APPEND_WRITE
);
1026 if (page
->index
== 0)
1027 set_inode_flag(F2FS_I(inode
), FI_FIRST_BLOCK_WRITTEN
);
1030 f2fs_put_dnode(&dn
);
1034 static int f2fs_write_data_page(struct page
*page
,
1035 struct writeback_control
*wbc
)
1037 struct inode
*inode
= page
->mapping
->host
;
1038 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1039 loff_t i_size
= i_size_read(inode
);
1040 const pgoff_t end_index
= ((unsigned long long) i_size
)
1041 >> PAGE_CACHE_SHIFT
;
1042 unsigned offset
= 0;
1043 bool need_balance_fs
= false;
1045 struct f2fs_io_info fio
= {
1048 .rw
= (wbc
->sync_mode
== WB_SYNC_ALL
) ? WRITE_SYNC
: WRITE
,
1050 .encrypted_page
= NULL
,
1053 trace_f2fs_writepage(page
, DATA
);
1055 if (page
->index
< end_index
)
1059 * If the offset is out-of-range of file size,
1060 * this page does not have to be written to disk.
1062 offset
= i_size
& (PAGE_CACHE_SIZE
- 1);
1063 if ((page
->index
>= end_index
+ 1) || !offset
)
1066 zero_user_segment(page
, offset
, PAGE_CACHE_SIZE
);
1068 if (unlikely(is_sbi_flag_set(sbi
, SBI_POR_DOING
)))
1070 if (f2fs_is_drop_cache(inode
))
1072 if (f2fs_is_volatile_file(inode
) && !wbc
->for_reclaim
&&
1073 available_free_memory(sbi
, BASE_CHECK
))
1076 /* Dentry blocks are controlled by checkpoint */
1077 if (S_ISDIR(inode
->i_mode
)) {
1078 if (unlikely(f2fs_cp_error(sbi
)))
1080 err
= do_write_data_page(&fio
);
1084 /* we should bypass data pages to proceed the kworkder jobs */
1085 if (unlikely(f2fs_cp_error(sbi
))) {
1090 if (!wbc
->for_reclaim
)
1091 need_balance_fs
= true;
1092 else if (has_not_enough_free_secs(sbi
, 0))
1097 if (f2fs_has_inline_data(inode
))
1098 err
= f2fs_write_inline_data(inode
, page
);
1100 err
= do_write_data_page(&fio
);
1101 f2fs_unlock_op(sbi
);
1103 if (err
&& err
!= -ENOENT
)
1106 clear_cold_data(page
);
1108 inode_dec_dirty_pages(inode
);
1110 ClearPageUptodate(page
);
1112 if (need_balance_fs
)
1113 f2fs_balance_fs(sbi
);
1114 if (wbc
->for_reclaim
)
1115 f2fs_submit_merged_bio(sbi
, DATA
, WRITE
);
1119 redirty_page_for_writepage(wbc
, page
);
1120 return AOP_WRITEPAGE_ACTIVATE
;
1123 static int __f2fs_writepage(struct page
*page
, struct writeback_control
*wbc
,
1126 struct address_space
*mapping
= data
;
1127 int ret
= mapping
->a_ops
->writepage(page
, wbc
);
1128 mapping_set_error(mapping
, ret
);
1133 * This function was copied from write_cche_pages from mm/page-writeback.c.
1134 * The major change is making write step of cold data page separately from
1135 * warm/hot data page.
1137 static int f2fs_write_cache_pages(struct address_space
*mapping
,
1138 struct writeback_control
*wbc
, writepage_t writepage
,
1143 struct pagevec pvec
;
1145 pgoff_t
uninitialized_var(writeback_index
);
1147 pgoff_t end
; /* Inclusive */
1150 int range_whole
= 0;
1154 pagevec_init(&pvec
, 0);
1156 if (wbc
->range_cyclic
) {
1157 writeback_index
= mapping
->writeback_index
; /* prev offset */
1158 index
= writeback_index
;
1165 index
= wbc
->range_start
>> PAGE_CACHE_SHIFT
;
1166 end
= wbc
->range_end
>> PAGE_CACHE_SHIFT
;
1167 if (wbc
->range_start
== 0 && wbc
->range_end
== LLONG_MAX
)
1169 cycled
= 1; /* ignore range_cyclic tests */
1171 if (wbc
->sync_mode
== WB_SYNC_ALL
|| wbc
->tagged_writepages
)
1172 tag
= PAGECACHE_TAG_TOWRITE
;
1174 tag
= PAGECACHE_TAG_DIRTY
;
1176 if (wbc
->sync_mode
== WB_SYNC_ALL
|| wbc
->tagged_writepages
)
1177 tag_pages_for_writeback(mapping
, index
, end
);
1179 while (!done
&& (index
<= end
)) {
1182 nr_pages
= pagevec_lookup_tag(&pvec
, mapping
, &index
, tag
,
1183 min(end
- index
, (pgoff_t
)PAGEVEC_SIZE
- 1) + 1);
1187 for (i
= 0; i
< nr_pages
; i
++) {
1188 struct page
*page
= pvec
.pages
[i
];
1190 if (page
->index
> end
) {
1195 done_index
= page
->index
;
1199 if (unlikely(page
->mapping
!= mapping
)) {
1205 if (!PageDirty(page
)) {
1206 /* someone wrote it for us */
1207 goto continue_unlock
;
1210 if (step
== 0 && !is_cold_data(page
))
1211 goto continue_unlock
;
1212 if (step
== 1 && is_cold_data(page
))
1213 goto continue_unlock
;
1215 if (PageWriteback(page
)) {
1216 if (wbc
->sync_mode
!= WB_SYNC_NONE
)
1217 f2fs_wait_on_page_writeback(page
, DATA
);
1219 goto continue_unlock
;
1222 BUG_ON(PageWriteback(page
));
1223 if (!clear_page_dirty_for_io(page
))
1224 goto continue_unlock
;
1226 ret
= (*writepage
)(page
, wbc
, data
);
1227 if (unlikely(ret
)) {
1228 if (ret
== AOP_WRITEPAGE_ACTIVATE
) {
1232 done_index
= page
->index
+ 1;
1238 if (--wbc
->nr_to_write
<= 0 &&
1239 wbc
->sync_mode
== WB_SYNC_NONE
) {
1244 pagevec_release(&pvec
);
1253 if (!cycled
&& !done
) {
1256 end
= writeback_index
- 1;
1259 if (wbc
->range_cyclic
|| (range_whole
&& wbc
->nr_to_write
> 0))
1260 mapping
->writeback_index
= done_index
;
1265 static int f2fs_write_data_pages(struct address_space
*mapping
,
1266 struct writeback_control
*wbc
)
1268 struct inode
*inode
= mapping
->host
;
1269 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1270 bool locked
= false;
1274 trace_f2fs_writepages(mapping
->host
, wbc
, DATA
);
1276 /* deal with chardevs and other special file */
1277 if (!mapping
->a_ops
->writepage
)
1280 if (S_ISDIR(inode
->i_mode
) && wbc
->sync_mode
== WB_SYNC_NONE
&&
1281 get_dirty_pages(inode
) < nr_pages_to_skip(sbi
, DATA
) &&
1282 available_free_memory(sbi
, DIRTY_DENTS
))
1285 /* during POR, we don't need to trigger writepage at all. */
1286 if (unlikely(is_sbi_flag_set(sbi
, SBI_POR_DOING
)))
1289 diff
= nr_pages_to_write(sbi
, DATA
, wbc
);
1291 if (!S_ISDIR(inode
->i_mode
)) {
1292 mutex_lock(&sbi
->writepages
);
1295 ret
= f2fs_write_cache_pages(mapping
, wbc
, __f2fs_writepage
, mapping
);
1296 f2fs_submit_merged_bio(sbi
, DATA
, WRITE
);
1298 mutex_unlock(&sbi
->writepages
);
1300 remove_dirty_dir_inode(inode
);
1302 wbc
->nr_to_write
= max((long)0, wbc
->nr_to_write
- diff
);
1306 wbc
->pages_skipped
+= get_dirty_pages(inode
);
1310 static void f2fs_write_failed(struct address_space
*mapping
, loff_t to
)
1312 struct inode
*inode
= mapping
->host
;
1314 if (to
> inode
->i_size
) {
1315 truncate_pagecache(inode
, inode
->i_size
);
1316 truncate_blocks(inode
, inode
->i_size
, true);
1320 static int f2fs_write_begin(struct file
*file
, struct address_space
*mapping
,
1321 loff_t pos
, unsigned len
, unsigned flags
,
1322 struct page
**pagep
, void **fsdata
)
1324 struct inode
*inode
= mapping
->host
;
1325 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1326 struct page
*page
= NULL
;
1328 pgoff_t index
= ((unsigned long long) pos
) >> PAGE_CACHE_SHIFT
;
1329 struct dnode_of_data dn
;
1332 trace_f2fs_write_begin(inode
, pos
, len
, flags
);
1334 f2fs_balance_fs(sbi
);
1337 * We should check this at this moment to avoid deadlock on inode page
1338 * and #0 page. The locking rule for inline_data conversion should be:
1339 * lock_page(page #0) -> lock_page(inode_page)
1342 err
= f2fs_convert_inline_inode(inode
);
1347 page
= grab_cache_page_write_begin(mapping
, index
, flags
);
1357 /* check inline_data */
1358 ipage
= get_node_page(sbi
, inode
->i_ino
);
1359 if (IS_ERR(ipage
)) {
1360 err
= PTR_ERR(ipage
);
1364 set_new_dnode(&dn
, inode
, ipage
, ipage
, 0);
1366 if (f2fs_has_inline_data(inode
)) {
1367 if (pos
+ len
<= MAX_INLINE_DATA
) {
1368 read_inline_data(page
, ipage
);
1369 set_inode_flag(F2FS_I(inode
), FI_DATA_EXIST
);
1370 sync_inode_page(&dn
);
1373 err
= f2fs_convert_inline_page(&dn
, page
);
1377 err
= f2fs_reserve_block(&dn
, index
);
1381 f2fs_put_dnode(&dn
);
1382 f2fs_unlock_op(sbi
);
1384 if (len
== PAGE_CACHE_SIZE
)
1386 if (PageUptodate(page
))
1389 f2fs_wait_on_page_writeback(page
, DATA
);
1391 if ((pos
& PAGE_CACHE_MASK
) >= i_size_read(inode
)) {
1392 unsigned start
= pos
& (PAGE_CACHE_SIZE
- 1);
1393 unsigned end
= start
+ len
;
1395 /* Reading beyond i_size is simple: memset to zero */
1396 zero_user_segments(page
, 0, start
, end
, PAGE_CACHE_SIZE
);
1400 if (dn
.data_blkaddr
== NEW_ADDR
) {
1401 zero_user_segment(page
, 0, PAGE_CACHE_SIZE
);
1403 struct f2fs_io_info fio
= {
1407 .blk_addr
= dn
.data_blkaddr
,
1409 .encrypted_page
= NULL
,
1411 err
= f2fs_submit_page_bio(&fio
);
1416 if (unlikely(!PageUptodate(page
))) {
1420 if (unlikely(page
->mapping
!= mapping
)) {
1421 f2fs_put_page(page
, 1);
1425 /* avoid symlink page */
1426 if (f2fs_encrypted_inode(inode
) && S_ISREG(inode
->i_mode
)) {
1427 err
= f2fs_decrypt_one(inode
, page
);
1433 SetPageUptodate(page
);
1435 clear_cold_data(page
);
1439 f2fs_put_dnode(&dn
);
1441 f2fs_unlock_op(sbi
);
1443 f2fs_put_page(page
, 1);
1444 f2fs_write_failed(mapping
, pos
+ len
);
1448 static int f2fs_write_end(struct file
*file
,
1449 struct address_space
*mapping
,
1450 loff_t pos
, unsigned len
, unsigned copied
,
1451 struct page
*page
, void *fsdata
)
1453 struct inode
*inode
= page
->mapping
->host
;
1455 trace_f2fs_write_end(inode
, pos
, len
, copied
);
1457 set_page_dirty(page
);
1459 if (pos
+ copied
> i_size_read(inode
)) {
1460 i_size_write(inode
, pos
+ copied
);
1461 mark_inode_dirty(inode
);
1462 update_inode_page(inode
);
1465 f2fs_put_page(page
, 1);
1469 static int check_direct_IO(struct inode
*inode
, struct iov_iter
*iter
,
1472 unsigned blocksize_mask
= inode
->i_sb
->s_blocksize
- 1;
1474 if (iov_iter_rw(iter
) == READ
)
1477 if (offset
& blocksize_mask
)
1480 if (iov_iter_alignment(iter
) & blocksize_mask
)
1486 static ssize_t
f2fs_direct_IO(struct kiocb
*iocb
, struct iov_iter
*iter
,
1489 struct file
*file
= iocb
->ki_filp
;
1490 struct address_space
*mapping
= file
->f_mapping
;
1491 struct inode
*inode
= mapping
->host
;
1492 size_t count
= iov_iter_count(iter
);
1495 /* we don't need to use inline_data strictly */
1496 if (f2fs_has_inline_data(inode
)) {
1497 err
= f2fs_convert_inline_inode(inode
);
1502 if (f2fs_encrypted_inode(inode
) && S_ISREG(inode
->i_mode
))
1505 if (check_direct_IO(inode
, iter
, offset
))
1508 trace_f2fs_direct_IO_enter(inode
, offset
, count
, iov_iter_rw(iter
));
1510 if (iov_iter_rw(iter
) == WRITE
)
1511 __allocate_data_blocks(inode
, offset
, count
);
1513 err
= blockdev_direct_IO(iocb
, inode
, iter
, offset
, get_data_block
);
1514 if (err
< 0 && iov_iter_rw(iter
) == WRITE
)
1515 f2fs_write_failed(mapping
, offset
+ count
);
1517 trace_f2fs_direct_IO_exit(inode
, offset
, count
, iov_iter_rw(iter
), err
);
1522 void f2fs_invalidate_page(struct page
*page
, unsigned int offset
,
1523 unsigned int length
)
1525 struct inode
*inode
= page
->mapping
->host
;
1526 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1528 if (inode
->i_ino
>= F2FS_ROOT_INO(sbi
) &&
1529 (offset
% PAGE_CACHE_SIZE
|| length
!= PAGE_CACHE_SIZE
))
1532 if (PageDirty(page
)) {
1533 if (inode
->i_ino
== F2FS_META_INO(sbi
))
1534 dec_page_count(sbi
, F2FS_DIRTY_META
);
1535 else if (inode
->i_ino
== F2FS_NODE_INO(sbi
))
1536 dec_page_count(sbi
, F2FS_DIRTY_NODES
);
1538 inode_dec_dirty_pages(inode
);
1540 ClearPagePrivate(page
);
1543 int f2fs_release_page(struct page
*page
, gfp_t wait
)
1545 /* If this is dirty page, keep PagePrivate */
1546 if (PageDirty(page
))
1549 ClearPagePrivate(page
);
1553 static int f2fs_set_data_page_dirty(struct page
*page
)
1555 struct address_space
*mapping
= page
->mapping
;
1556 struct inode
*inode
= mapping
->host
;
1558 trace_f2fs_set_page_dirty(page
, DATA
);
1560 SetPageUptodate(page
);
1562 if (f2fs_is_atomic_file(inode
)) {
1563 register_inmem_page(inode
, page
);
1567 if (!PageDirty(page
)) {
1568 __set_page_dirty_nobuffers(page
);
1569 update_dirty_page(inode
, page
);
1575 static sector_t
f2fs_bmap(struct address_space
*mapping
, sector_t block
)
1577 struct inode
*inode
= mapping
->host
;
1579 /* we don't need to use inline_data strictly */
1580 if (f2fs_has_inline_data(inode
)) {
1581 int err
= f2fs_convert_inline_inode(inode
);
1585 return generic_block_bmap(mapping
, block
, get_data_block
);
1588 const struct address_space_operations f2fs_dblock_aops
= {
1589 .readpage
= f2fs_read_data_page
,
1590 .readpages
= f2fs_read_data_pages
,
1591 .writepage
= f2fs_write_data_page
,
1592 .writepages
= f2fs_write_data_pages
,
1593 .write_begin
= f2fs_write_begin
,
1594 .write_end
= f2fs_write_end
,
1595 .set_page_dirty
= f2fs_set_data_page_dirty
,
1596 .invalidatepage
= f2fs_invalidate_page
,
1597 .releasepage
= f2fs_release_page
,
1598 .direct_IO
= f2fs_direct_IO
,