Commit c6f0b395 authored by Shiyang Ruan's avatar Shiyang Ruan Committed by Andrew Morton

fsdax,xfs: set the shared flag when file extent is shared

If a dax page is shared, mapread at different offsets can also trigger
page fault on same dax page.  So, change the flag from "cow" to "shared". 
And get the shared flag from filesystem when read.

Link: https://lkml.kernel.org/r/1669908538-55-5-git-send-email-ruansy.fnst@fujitsu.comSigned-off-by: default avatarShiyang Ruan <ruansy.fnst@fujitsu.com>
Reviewed-by: default avatarDarrick J. Wong <djwong@kernel.org>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Jason Gunthorpe <jgg@nvidia.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 708dfad2
...@@ -846,12 +846,6 @@ static bool dax_fault_is_synchronous(const struct iomap_iter *iter, ...@@ -846,12 +846,6 @@ static bool dax_fault_is_synchronous(const struct iomap_iter *iter,
(iter->iomap.flags & IOMAP_F_DIRTY); (iter->iomap.flags & IOMAP_F_DIRTY);
} }
static bool dax_fault_is_cow(const struct iomap_iter *iter)
{
return (iter->flags & IOMAP_WRITE) &&
(iter->iomap.flags & IOMAP_F_SHARED);
}
/* /*
* By this point grab_mapping_entry() has ensured that we have a locked entry * By this point grab_mapping_entry() has ensured that we have a locked entry
* of the appropriate size so we don't have to worry about downgrading PMDs to * of the appropriate size so we don't have to worry about downgrading PMDs to
...@@ -865,13 +859,14 @@ static void *dax_insert_entry(struct xa_state *xas, struct vm_fault *vmf, ...@@ -865,13 +859,14 @@ static void *dax_insert_entry(struct xa_state *xas, struct vm_fault *vmf,
{ {
struct address_space *mapping = vmf->vma->vm_file->f_mapping; struct address_space *mapping = vmf->vma->vm_file->f_mapping;
void *new_entry = dax_make_entry(pfn, flags); void *new_entry = dax_make_entry(pfn, flags);
bool dirty = !dax_fault_is_synchronous(iter, vmf->vma); bool write = iter->flags & IOMAP_WRITE;
bool cow = dax_fault_is_cow(iter); bool dirty = write && !dax_fault_is_synchronous(iter, vmf->vma);
bool shared = iter->iomap.flags & IOMAP_F_SHARED;
if (dirty) if (dirty)
__mark_inode_dirty(mapping->host, I_DIRTY_PAGES); __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
if (cow || (dax_is_zero_entry(entry) && !(flags & DAX_ZERO_PAGE))) { if (shared || (dax_is_zero_entry(entry) && !(flags & DAX_ZERO_PAGE))) {
unsigned long index = xas->xa_index; unsigned long index = xas->xa_index;
/* we are replacing a zero page with block mapping */ /* we are replacing a zero page with block mapping */
if (dax_is_pmd_entry(entry)) if (dax_is_pmd_entry(entry))
...@@ -883,12 +878,12 @@ static void *dax_insert_entry(struct xa_state *xas, struct vm_fault *vmf, ...@@ -883,12 +878,12 @@ static void *dax_insert_entry(struct xa_state *xas, struct vm_fault *vmf,
xas_reset(xas); xas_reset(xas);
xas_lock_irq(xas); xas_lock_irq(xas);
if (cow || dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) { if (shared || dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
void *old; void *old;
dax_disassociate_entry(entry, mapping, false); dax_disassociate_entry(entry, mapping, false);
dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address, dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address,
cow); shared);
/* /*
* Only swap our new entry into the page cache if the current * Only swap our new entry into the page cache if the current
* entry is a zero page or an empty entry. If a normal PTE or * entry is a zero page or an empty entry. If a normal PTE or
...@@ -908,7 +903,7 @@ static void *dax_insert_entry(struct xa_state *xas, struct vm_fault *vmf, ...@@ -908,7 +903,7 @@ static void *dax_insert_entry(struct xa_state *xas, struct vm_fault *vmf,
if (dirty) if (dirty)
xas_set_mark(xas, PAGECACHE_TAG_DIRTY); xas_set_mark(xas, PAGECACHE_TAG_DIRTY);
if (cow) if (write && shared)
xas_set_mark(xas, PAGECACHE_TAG_TOWRITE); xas_set_mark(xas, PAGECACHE_TAG_TOWRITE);
xas_unlock_irq(xas); xas_unlock_irq(xas);
......
...@@ -1215,7 +1215,7 @@ xfs_read_iomap_begin( ...@@ -1215,7 +1215,7 @@ xfs_read_iomap_begin(
return error; return error;
error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap, error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
&nimaps, 0); &nimaps, 0);
if (!error && (flags & IOMAP_REPORT)) if (!error && ((flags & IOMAP_REPORT) || IS_DAX(inode)))
error = xfs_reflink_trim_around_shared(ip, &imap, &shared); error = xfs_reflink_trim_around_shared(ip, &imap, &shared);
xfs_iunlock(ip, lockmode); xfs_iunlock(ip, lockmode);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment