Commit 00cc790e authored by Shiyang Ruan's avatar Shiyang Ruan Committed by akpm

mm: factor helpers for memory_failure_dev_pagemap

memory_failure_dev_pagemap code is a bit complex before introduce RMAP
feature for fsdax.  So it is needed to factor some helper functions to
simplify these code.

[akpm@linux-foundation.org: fix CONFIG_HUGETLB_PAGE=n build]
[zhengbin13@huawei.com: fix redefinition of mf_generic_kill_procs]
  Link: https://lkml.kernel.org/r/20220628112143.1170473-1-zhengbin13@huawei.com
Link: https://lkml.kernel.org/r/20220603053738.1218681-3-ruansy.fnst@fujitsu.comSigned-off-by: default avatarShiyang Ruan <ruansy.fnst@fujitsu.com>
Signed-off-by: default avatarZheng Bin <zhengbin13@huawei.com>
Reviewed-by: default avatarDarrick J. Wong <djwong@kernel.org>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarDan Williams <dan.j.williams@intel.com>
Reviewed-by: default avatarMiaohe Lin <linmiaohe@huawei.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Dan Williams <dan.j.wiliams@intel.com>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Goldwyn Rodrigues <rgoldwyn@suse.com>
Cc: Goldwyn Rodrigues <rgoldwyn@suse.de>
Cc: Jane Chu <jane.chu@oracle.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Naoya Horiguchi <naoya.horiguchi@nec.com>
Cc: Ritesh Harjani <riteshh@linux.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 8012b866
...@@ -1499,6 +1499,95 @@ static int try_to_split_thp_page(struct page *page, const char *msg) ...@@ -1499,6 +1499,95 @@ static int try_to_split_thp_page(struct page *page, const char *msg)
return 0; return 0;
} }
static void unmap_and_kill(struct list_head *to_kill, unsigned long pfn,
struct address_space *mapping, pgoff_t index, int flags)
{
struct to_kill *tk;
unsigned long size = 0;
list_for_each_entry(tk, to_kill, nd)
if (tk->size_shift)
size = max(size, 1UL << tk->size_shift);
if (size) {
/*
* Unmap the largest mapping to avoid breaking up device-dax
* mappings which are constant size. The actual size of the
* mapping being torn down is communicated in siginfo, see
* kill_proc()
*/
loff_t start = (index << PAGE_SHIFT) & ~(size - 1);
unmap_mapping_range(mapping, start, size, 0);
}
kill_procs(to_kill, flags & MF_MUST_KILL, false, pfn, flags);
}
static int mf_generic_kill_procs(unsigned long long pfn, int flags,
struct dev_pagemap *pgmap)
{
struct page *page = pfn_to_page(pfn);
LIST_HEAD(to_kill);
dax_entry_t cookie;
int rc = 0;
/*
* Pages instantiated by device-dax (not filesystem-dax)
* may be compound pages.
*/
page = compound_head(page);
/*
* Prevent the inode from being freed while we are interrogating
* the address_space, typically this would be handled by
* lock_page(), but dax pages do not use the page lock. This
* also prevents changes to the mapping of this pfn until
* poison signaling is complete.
*/
cookie = dax_lock_page(page);
if (!cookie)
return -EBUSY;
if (hwpoison_filter(page)) {
rc = -EOPNOTSUPP;
goto unlock;
}
switch (pgmap->type) {
case MEMORY_DEVICE_PRIVATE:
case MEMORY_DEVICE_COHERENT:
/*
* TODO: Handle device pages which may need coordination
* with device-side memory.
*/
rc = -ENXIO;
goto unlock;
default:
break;
}
/*
* Use this flag as an indication that the dax page has been
* remapped UC to prevent speculative consumption of poison.
*/
SetPageHWPoison(page);
/*
* Unlike System-RAM there is no possibility to swap in a
* different physical page at a given virtual address, so all
* userspace consumption of ZONE_DEVICE memory necessitates
* SIGBUS (i.e. MF_MUST_KILL)
*/
flags |= MF_ACTION_REQUIRED | MF_MUST_KILL;
collect_procs(page, &to_kill, true);
unmap_and_kill(&to_kill, pfn, page->mapping, page->index, flags);
unlock:
dax_unlock_page(page, cookie);
return rc;
}
/* /*
* Called from hugetlb code with hugetlb_lock held. * Called from hugetlb code with hugetlb_lock held.
* *
...@@ -1634,23 +1723,20 @@ static int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb ...@@ -1634,23 +1723,20 @@ static int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb
unlock_page(head); unlock_page(head);
return res; return res;
} }
#else #else
static inline int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb) static inline int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb)
{ {
return 0; return 0;
} }
#endif
#endif /* CONFIG_HUGETLB_PAGE */
static int memory_failure_dev_pagemap(unsigned long pfn, int flags, static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
struct dev_pagemap *pgmap) struct dev_pagemap *pgmap)
{ {
struct page *page = pfn_to_page(pfn); struct page *page = pfn_to_page(pfn);
unsigned long size = 0; int rc = -ENXIO;
struct to_kill *tk;
LIST_HEAD(tokill);
int rc = -EBUSY;
loff_t start;
dax_entry_t cookie;
if (flags & MF_COUNT_INCREASED) if (flags & MF_COUNT_INCREASED)
/* /*
...@@ -1659,77 +1745,10 @@ static int memory_failure_dev_pagemap(unsigned long pfn, int flags, ...@@ -1659,77 +1745,10 @@ static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
put_page(page); put_page(page);
/* device metadata space is not recoverable */ /* device metadata space is not recoverable */
if (!pgmap_pfn_valid(pgmap, pfn)) { if (!pgmap_pfn_valid(pgmap, pfn))
rc = -ENXIO;
goto out; goto out;
}
/*
* Pages instantiated by device-dax (not filesystem-dax)
* may be compound pages.
*/
page = compound_head(page);
/*
* Prevent the inode from being freed while we are interrogating
* the address_space, typically this would be handled by
* lock_page(), but dax pages do not use the page lock. This
* also prevents changes to the mapping of this pfn until
* poison signaling is complete.
*/
cookie = dax_lock_page(page);
if (!cookie)
goto out;
if (hwpoison_filter(page)) {
rc = -EOPNOTSUPP;
goto unlock;
}
switch (pgmap->type) {
case MEMORY_DEVICE_PRIVATE:
case MEMORY_DEVICE_COHERENT:
/*
* TODO: Handle device pages which may need coordination
* with device-side memory.
*/
goto unlock;
default:
break;
}
/* rc = mf_generic_kill_procs(pfn, flags, pgmap);
* Use this flag as an indication that the dax page has been
* remapped UC to prevent speculative consumption of poison.
*/
SetPageHWPoison(page);
/*
* Unlike System-RAM there is no possibility to swap in a
* different physical page at a given virtual address, so all
* userspace consumption of ZONE_DEVICE memory necessitates
* SIGBUS (i.e. MF_MUST_KILL)
*/
flags |= MF_ACTION_REQUIRED | MF_MUST_KILL;
collect_procs(page, &tokill, true);
list_for_each_entry(tk, &tokill, nd)
if (tk->size_shift)
size = max(size, 1UL << tk->size_shift);
if (size) {
/*
* Unmap the largest mapping to avoid breaking up
* device-dax mappings which are constant size. The
* actual size of the mapping being torn down is
* communicated in siginfo, see kill_proc()
*/
start = (page->index << PAGE_SHIFT) & ~(size - 1);
unmap_mapping_range(page->mapping, start, size, 0);
}
kill_procs(&tokill, true, false, pfn, flags);
rc = 0;
unlock:
dax_unlock_page(page, cookie);
out: out:
/* drop pgmap ref acquired in caller */ /* drop pgmap ref acquired in caller */
put_dev_pagemap(pgmap); put_dev_pagemap(pgmap);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment