Commit 429f8de7 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Dan Williams

fsdax: use a saner calling convention for copy_cow_page_dax

Just pass the vm_fault and iomap_iter structures, and figure out the rest
locally.  Note that this requires moving dax_iomap_sector up in the file.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarDan Williams <dan.j.williams@intel.com>
Reviewed-by: default avatarDarrick J. Wong <djwong@kernel.org>
Link: https://lore.kernel.org/r/20211129102203.2243509-14-hch@lst.deSigned-off-by: default avatarDan Williams <dan.j.williams@intel.com>
parent 9dc2f9cd
...@@ -709,26 +709,31 @@ int dax_invalidate_mapping_entry_sync(struct address_space *mapping, ...@@ -709,26 +709,31 @@ int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
return __dax_invalidate_entry(mapping, index, false); return __dax_invalidate_entry(mapping, index, false);
} }
static int copy_cow_page_dax(struct block_device *bdev, struct dax_device *dax_dev, static sector_t dax_iomap_sector(const struct iomap *iomap, loff_t pos)
sector_t sector, struct page *to, unsigned long vaddr)
{ {
return (iomap->addr + (pos & PAGE_MASK) - iomap->offset) >> 9;
}
static int copy_cow_page_dax(struct vm_fault *vmf, const struct iomap_iter *iter)
{
sector_t sector = dax_iomap_sector(&iter->iomap, iter->pos);
void *vto, *kaddr; void *vto, *kaddr;
pgoff_t pgoff; pgoff_t pgoff;
long rc; long rc;
int id; int id;
rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff); rc = bdev_dax_pgoff(iter->iomap.bdev, sector, PAGE_SIZE, &pgoff);
if (rc) if (rc)
return rc; return rc;
id = dax_read_lock(); id = dax_read_lock();
rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr, NULL); rc = dax_direct_access(iter->iomap.dax_dev, pgoff, 1, &kaddr, NULL);
if (rc < 0) { if (rc < 0) {
dax_read_unlock(id); dax_read_unlock(id);
return rc; return rc;
} }
vto = kmap_atomic(to); vto = kmap_atomic(vmf->cow_page);
copy_user_page(vto, kaddr, vaddr, to); copy_user_page(vto, kaddr, vmf->address, vmf->cow_page);
kunmap_atomic(vto); kunmap_atomic(vto);
dax_read_unlock(id); dax_read_unlock(id);
return 0; return 0;
...@@ -1005,11 +1010,6 @@ int dax_writeback_mapping_range(struct address_space *mapping, ...@@ -1005,11 +1010,6 @@ int dax_writeback_mapping_range(struct address_space *mapping,
} }
EXPORT_SYMBOL_GPL(dax_writeback_mapping_range); EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
static sector_t dax_iomap_sector(const struct iomap *iomap, loff_t pos)
{
return (iomap->addr + (pos & PAGE_MASK) - iomap->offset) >> 9;
}
static int dax_iomap_pfn(const struct iomap *iomap, loff_t pos, size_t size, static int dax_iomap_pfn(const struct iomap *iomap, loff_t pos, size_t size,
pfn_t *pfnp) pfn_t *pfnp)
{ {
...@@ -1332,19 +1332,16 @@ static vm_fault_t dax_fault_synchronous_pfnp(pfn_t *pfnp, pfn_t pfn) ...@@ -1332,19 +1332,16 @@ static vm_fault_t dax_fault_synchronous_pfnp(pfn_t *pfnp, pfn_t pfn)
static vm_fault_t dax_fault_cow_page(struct vm_fault *vmf, static vm_fault_t dax_fault_cow_page(struct vm_fault *vmf,
const struct iomap_iter *iter) const struct iomap_iter *iter)
{ {
sector_t sector = dax_iomap_sector(&iter->iomap, iter->pos);
unsigned long vaddr = vmf->address;
vm_fault_t ret; vm_fault_t ret;
int error = 0; int error = 0;
switch (iter->iomap.type) { switch (iter->iomap.type) {
case IOMAP_HOLE: case IOMAP_HOLE:
case IOMAP_UNWRITTEN: case IOMAP_UNWRITTEN:
clear_user_highpage(vmf->cow_page, vaddr); clear_user_highpage(vmf->cow_page, vmf->address);
break; break;
case IOMAP_MAPPED: case IOMAP_MAPPED:
error = copy_cow_page_dax(iter->iomap.bdev, iter->iomap.dax_dev, error = copy_cow_page_dax(vmf, iter);
sector, vmf->cow_page, vaddr);
break; break;
default: default:
WARN_ON_ONCE(1); WARN_ON_ONCE(1);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment