1 dax: pass detailed error code from dax_iomap_fault()
3 From: Jan Kara <jack@suse.cz>
5 Ext4 needs to pass through error from its iomap handler to the page
6 fault handler so that it can properly detect ENOSPC and force
7 transaction commit and retry the fault (and block allocation). Add
8 argument to dax_iomap_fault() for passing such error.
10 Reviewed-by: Ross Zwisler <ross.zwisler@linux.intel.com>
11 Signed-off-by: Jan Kara <jack@suse.cz>
12 Signed-off-by: Theodore Ts'o <tytso@mit.edu>
14 fs/dax.c | 9 ++++++---
17 fs/xfs/xfs_file.c | 2 +-
18 include/linux/dax.h | 2 +-
19 5 files changed, 10 insertions(+), 7 deletions(-)
21 diff --git a/fs/dax.c b/fs/dax.c
22 index 95981591977a..f3afa1d6156c 100644
25 @@ -1096,7 +1096,7 @@ static bool dax_fault_is_synchronous(unsigned long flags,
28 static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
29 - const struct iomap_ops *ops)
30 + int *iomap_errp, const struct iomap_ops *ops)
32 struct vm_area_struct *vma = vmf->vma;
33 struct address_space *mapping = vma->vm_file->f_mapping;
34 @@ -1149,6 +1149,8 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
35 * that we never have to deal with more than a single extent here.
37 error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap);
39 + *iomap_errp = error;
41 vmf_ret = dax_fault_return(error);
43 @@ -1488,6 +1490,7 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
44 * @vmf: The description of the fault
45 * @pe_size: Size of the page to fault in
46 * @pfnp: PFN to insert for synchronous faults if fsync is required
47 + * @iomap_errp: Storage for detailed error code in case of error
48 * @ops: Iomap ops passed from the file system
50 * When a page fault occurs, filesystems may call this helper in
51 @@ -1496,11 +1499,11 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
54 int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
55 - pfn_t *pfnp, const struct iomap_ops *ops)
56 + pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops)
60 - return dax_iomap_pte_fault(vmf, pfnp, ops);
61 + return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops);
63 return dax_iomap_pmd_fault(vmf, pfnp, ops);
65 diff --git a/fs/ext2/file.c b/fs/ext2/file.c
66 index 2da67699dc33..09640220fda8 100644
69 @@ -100,7 +100,7 @@ static int ext2_dax_fault(struct vm_fault *vmf)
71 down_read(&ei->dax_sem);
73 - ret = dax_iomap_fault(vmf, PE_SIZE_PTE, NULL, &ext2_iomap_ops);
74 + ret = dax_iomap_fault(vmf, PE_SIZE_PTE, NULL, NULL, &ext2_iomap_ops);
76 up_read(&ei->dax_sem);
77 if (vmf->flags & FAULT_FLAG_WRITE)
78 diff --git a/fs/ext4/file.c b/fs/ext4/file.c
79 index a0ae27b1bc66..1c7cd882d998 100644
82 @@ -314,7 +314,7 @@ static int ext4_dax_huge_fault(struct vm_fault *vmf,
84 down_read(&EXT4_I(inode)->i_mmap_sem);
86 - result = dax_iomap_fault(vmf, pe_size, &pfn, &ext4_iomap_ops);
87 + result = dax_iomap_fault(vmf, pe_size, &pfn, NULL, &ext4_iomap_ops);
89 ext4_journal_stop(handle);
90 /* Handling synchronous page fault? */
91 diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
92 index 8601275cc5e6..9ea08326f876 100644
93 --- a/fs/xfs/xfs_file.c
94 +++ b/fs/xfs/xfs_file.c
95 @@ -1048,7 +1048,7 @@ __xfs_filemap_fault(
99 - ret = dax_iomap_fault(vmf, pe_size, &pfn, &xfs_iomap_ops);
100 + ret = dax_iomap_fault(vmf, pe_size, &pfn, NULL, &xfs_iomap_ops);
101 if (ret & VM_FAULT_NEEDDSYNC)
102 ret = dax_finish_sync_fault(vmf, pe_size, pfn);
104 diff --git a/include/linux/dax.h b/include/linux/dax.h
105 index 5258346c558c..0185ecdae135 100644
106 --- a/include/linux/dax.h
107 +++ b/include/linux/dax.h
108 @@ -96,7 +96,7 @@ bool dax_write_cache_enabled(struct dax_device *dax_dev);
109 ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
110 const struct iomap_ops *ops);
111 int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
112 - pfn_t *pfnp, const struct iomap_ops *ops);
113 + pfn_t *pfnp, int *errp, const struct iomap_ops *ops);
114 int dax_finish_sync_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
116 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);