1 ext4: pre-zero allocated blocks for DAX IO
3 From: Jan Kara <jack@suse.cz>
5 Currently ext4 treats DAX IO the same way as direct IO. I.e., it
6 allocates unwritten extents before IO is done and converts unwritten
7 extents afterwards. However this way DAX IO can race with page fault to
10 ext4_ext_direct_IO() dax_fault()
12 get_block() - allocates unwritten extent
14 get_block() - converts
18 ext4_convert_unwritten_extents()
20 So data written with DAX IO gets lost. Similarly dax_new_buf() called
21 from dax_io() can overwrite data that has been already written to the
24 Fix the problem by using pre-zeroed blocks for DAX IO the same way as we
25 use them for DAX mmap. The downside of this solution is that every
26 allocating write writes each block twice (once zeros, once data). Fixing
27 the race with locking is possible as well however we would need to
28 lock-out faults for the whole range written to by DAX IO. And that is
29 not easy to do without locking-out faults for the whole file which seems
32 Signed-off-by: Jan Kara <jack@suse.cz>
33 Signed-off-by: Theodore Ts'o <tytso@mit.edu>
35 fs/ext4/ext4.h | 11 +++++++++--
36 fs/ext4/file.c | 4 ++--
37 fs/ext4/inode.c | 43 +++++++++++++++++++++++++++++++++----------
38 3 files changed, 44 insertions(+), 14 deletions(-)
40 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
41 index 35792b430fb6..516e3dd506c0 100644
44 @@ -2521,8 +2521,8 @@ struct buffer_head *ext4_getblk(handle_t *, struct inode *, ext4_lblk_t, int);
45 struct buffer_head *ext4_bread(handle_t *, struct inode *, ext4_lblk_t, int);
46 int ext4_get_block_unwritten(struct inode *inode, sector_t iblock,
47 struct buffer_head *bh_result, int create);
48 -int ext4_dax_mmap_get_block(struct inode *inode, sector_t iblock,
49 - struct buffer_head *bh_result, int create);
50 +int ext4_dax_get_block(struct inode *inode, sector_t iblock,
51 + struct buffer_head *bh_result, int create);
52 int ext4_get_block(struct inode *inode, sector_t iblock,
53 struct buffer_head *bh_result, int create);
54 int ext4_dio_get_block(struct inode *inode, sector_t iblock,
55 @@ -3328,6 +3328,13 @@ static inline void ext4_clear_io_unwritten_flag(ext4_io_end_t *io_end)
59 +static inline bool ext4_aligned_io(struct inode *inode, loff_t off, loff_t len)
61 + int blksize = 1 << inode->i_blkbits;
63 + return IS_ALIGNED(off, blksize) && IS_ALIGNED(len, blksize);
66 #endif /* __KERNEL__ */
68 #define EFSBADCRC EBADMSG /* Bad CRC detected */
69 diff --git a/fs/ext4/file.c b/fs/ext4/file.c
70 index fa2208bae2e1..dfb33da04589 100644
73 @@ -207,7 +207,7 @@ static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
75 result = VM_FAULT_SIGBUS;
77 - result = __dax_fault(vma, vmf, ext4_dax_mmap_get_block, NULL);
78 + result = __dax_fault(vma, vmf, ext4_dax_get_block, NULL);
82 @@ -243,7 +243,7 @@ static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
83 result = VM_FAULT_SIGBUS;
85 result = __dax_pmd_fault(vma, addr, pmd, flags,
86 - ext4_dax_mmap_get_block, NULL);
87 + ext4_dax_get_block, NULL);
91 diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
92 index 23fd0e0a9223..a2b7e4761c82 100644
95 @@ -3215,13 +3215,17 @@ static int ext4_releasepage(struct page *page, gfp_t wait)
99 -int ext4_dax_mmap_get_block(struct inode *inode, sector_t iblock,
100 - struct buffer_head *bh_result, int create)
102 + * Get block function for DAX IO and mmap faults. It takes care of converting
103 + * unwritten extents to written ones and initializes new / converted blocks
106 +int ext4_dax_get_block(struct inode *inode, sector_t iblock,
107 + struct buffer_head *bh_result, int create)
111 - ext4_debug("ext4_dax_mmap_get_block: inode %lu, create flag %d\n",
112 - inode->i_ino, create);
113 + ext4_debug("inode %lu, create flag %d\n", inode->i_ino, create);
115 return _ext4_get_block(inode, iblock, bh_result, 0);
117 @@ -3233,9 +3237,9 @@ int ext4_dax_mmap_get_block(struct inode *inode, sector_t iblock,
119 if (buffer_unwritten(bh_result)) {
121 - * We are protected by i_mmap_sem so we know block cannot go
122 - * away from under us even though we dropped i_data_sem.
123 - * Convert extent to written and write zeros there.
124 + * We are protected by i_mmap_sem or i_mutex so we know block
125 + * cannot go away from under us even though we dropped
126 + * i_data_sem. Convert extent to written and write zeros there.
128 ret = ext4_get_block_trans(inode, iblock, bh_result,
129 EXT4_GET_BLOCKS_CONVERT |
130 @@ -3250,6 +3254,14 @@ int ext4_dax_mmap_get_block(struct inode *inode, sector_t iblock,
131 clear_buffer_new(bh_result);
135 +/* Just define empty function, it will never get called. */
136 +int ext4_dax_get_block(struct inode *inode, sector_t iblock,
137 + struct buffer_head *bh_result, int create)
144 static int ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
145 @@ -3371,8 +3383,20 @@ static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter,
146 iocb->private = NULL;
148 get_block_func = ext4_dio_get_block_overwrite;
149 - else if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) ||
150 - round_down(offset, 1 << inode->i_blkbits) >= inode->i_size) {
151 + else if (IS_DAX(inode)) {
153 + * We can avoid zeroing for aligned DAX writes beyond EOF. Other
154 + * writes need zeroing either because they can race with page
155 + * faults or because they use partial blocks.
157 + if (round_down(offset, 1<<inode->i_blkbits) >= inode->i_size &&
158 + ext4_aligned_io(inode, offset, count))
159 + get_block_func = ext4_dio_get_block;
161 + get_block_func = ext4_dax_get_block;
162 + dio_flags = DIO_LOCKING;
163 + } else if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) ||
164 + round_down(offset, 1 << inode->i_blkbits) >= inode->i_size) {
165 get_block_func = ext4_dio_get_block;
166 dio_flags = DIO_LOCKING | DIO_SKIP_HOLES;
167 } else if (is_sync_kiocb(iocb)) {
168 @@ -3386,7 +3410,6 @@ static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter,
169 BUG_ON(ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode));
172 - dio_flags &= ~DIO_SKIP_HOLES;
173 ret = dax_do_io(iocb, inode, iter, offset, get_block_func,
174 ext4_end_io_dio, dio_flags);