Commit c0b24625 authored by Jan Kara's avatar Jan Kara Committed by Theodore Ts'o

dax: pass detailed error code from dax_iomap_fault()

Ext4 needs to pass through error from its iomap handler to the page
fault handler so that it can properly detect ENOSPC and force
transaction commit and retry the fault (and block allocation). Add
argument to dax_iomap_fault() for passing such error.
Reviewed-by: default avatarRoss Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: default avatarJan Kara <jack@suse.cz>
Signed-off-by: default avatarTheodore Ts'o <tytso@mit.edu>
parent bbe45d24
...@@ -1096,7 +1096,7 @@ static bool dax_fault_is_synchronous(unsigned long flags, ...@@ -1096,7 +1096,7 @@ static bool dax_fault_is_synchronous(unsigned long flags,
} }
static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp, static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
const struct iomap_ops *ops) int *iomap_errp, const struct iomap_ops *ops)
{ {
struct vm_area_struct *vma = vmf->vma; struct vm_area_struct *vma = vmf->vma;
struct address_space *mapping = vma->vm_file->f_mapping; struct address_space *mapping = vma->vm_file->f_mapping;
...@@ -1149,6 +1149,8 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp, ...@@ -1149,6 +1149,8 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
* that we never have to deal with more than a single extent here. * that we never have to deal with more than a single extent here.
*/ */
error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap); error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap);
if (iomap_errp)
*iomap_errp = error;
if (error) { if (error) {
vmf_ret = dax_fault_return(error); vmf_ret = dax_fault_return(error);
goto unlock_entry; goto unlock_entry;
...@@ -1488,6 +1490,7 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, ...@@ -1488,6 +1490,7 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
* @vmf: The description of the fault * @vmf: The description of the fault
* @pe_size: Size of the page to fault in * @pe_size: Size of the page to fault in
* @pfnp: PFN to insert for synchronous faults if fsync is required * @pfnp: PFN to insert for synchronous faults if fsync is required
* @iomap_errp: Storage for detailed error code in case of error
* @ops: Iomap ops passed from the file system * @ops: Iomap ops passed from the file system
* *
* When a page fault occurs, filesystems may call this helper in * When a page fault occurs, filesystems may call this helper in
...@@ -1496,11 +1499,11 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, ...@@ -1496,11 +1499,11 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
* successfully. * successfully.
*/ */
int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size, int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
pfn_t *pfnp, const struct iomap_ops *ops) pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops)
{ {
switch (pe_size) { switch (pe_size) {
case PE_SIZE_PTE: case PE_SIZE_PTE:
return dax_iomap_pte_fault(vmf, pfnp, ops); return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops);
case PE_SIZE_PMD: case PE_SIZE_PMD:
return dax_iomap_pmd_fault(vmf, pfnp, ops); return dax_iomap_pmd_fault(vmf, pfnp, ops);
default: default:
......
...@@ -100,7 +100,7 @@ static int ext2_dax_fault(struct vm_fault *vmf) ...@@ -100,7 +100,7 @@ static int ext2_dax_fault(struct vm_fault *vmf)
} }
down_read(&ei->dax_sem); down_read(&ei->dax_sem);
ret = dax_iomap_fault(vmf, PE_SIZE_PTE, NULL, &ext2_iomap_ops); ret = dax_iomap_fault(vmf, PE_SIZE_PTE, NULL, NULL, &ext2_iomap_ops);
up_read(&ei->dax_sem); up_read(&ei->dax_sem);
if (vmf->flags & FAULT_FLAG_WRITE) if (vmf->flags & FAULT_FLAG_WRITE)
......
...@@ -314,7 +314,7 @@ static int ext4_dax_huge_fault(struct vm_fault *vmf, ...@@ -314,7 +314,7 @@ static int ext4_dax_huge_fault(struct vm_fault *vmf,
} else { } else {
down_read(&EXT4_I(inode)->i_mmap_sem); down_read(&EXT4_I(inode)->i_mmap_sem);
} }
result = dax_iomap_fault(vmf, pe_size, &pfn, &ext4_iomap_ops); result = dax_iomap_fault(vmf, pe_size, &pfn, NULL, &ext4_iomap_ops);
if (write) { if (write) {
ext4_journal_stop(handle); ext4_journal_stop(handle);
/* Handling synchronous page fault? */ /* Handling synchronous page fault? */
......
...@@ -1048,7 +1048,7 @@ __xfs_filemap_fault( ...@@ -1048,7 +1048,7 @@ __xfs_filemap_fault(
if (IS_DAX(inode)) { if (IS_DAX(inode)) {
pfn_t pfn; pfn_t pfn;
ret = dax_iomap_fault(vmf, pe_size, &pfn, &xfs_iomap_ops); ret = dax_iomap_fault(vmf, pe_size, &pfn, NULL, &xfs_iomap_ops);
if (ret & VM_FAULT_NEEDDSYNC) if (ret & VM_FAULT_NEEDDSYNC)
ret = dax_finish_sync_fault(vmf, pe_size, pfn); ret = dax_finish_sync_fault(vmf, pe_size, pfn);
} else { } else {
......
...@@ -96,7 +96,7 @@ bool dax_write_cache_enabled(struct dax_device *dax_dev); ...@@ -96,7 +96,7 @@ bool dax_write_cache_enabled(struct dax_device *dax_dev);
ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
const struct iomap_ops *ops); const struct iomap_ops *ops);
int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size, int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
pfn_t *pfnp, const struct iomap_ops *ops); pfn_t *pfnp, int *errp, const struct iomap_ops *ops);
int dax_finish_sync_fault(struct vm_fault *vmf, enum page_entry_size pe_size, int dax_finish_sync_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
pfn_t pfn); pfn_t pfn);
int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index); int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment