Commit a873dfe1 authored by Tony Luck's avatar Tony Luck Committed by Andrew Morton

mm, hwpoison: try to recover from copy-on write faults

Patch series "Copy-on-write poison recovery", v3.

Part 1 deals with the process that triggered the copy on write fault with
a store to a shared read-only page.  That process is send a SIGBUS with
the usual machine check decoration to specify the virtual address of the
lost page, together with the scope.

Part 2 sets up to asynchronously take the page with the uncorrected error
offline to prevent additional machine check faults.  H/t to Miaohe Lin
<linmiaohe@huawei.com> and Shuai Xue <xueshuai@linux.alibaba.com> for
pointing me to the existing function to queue a call to memory_failure().

On x86 there is some duplicate reporting (because the error is also
signalled by the memory controller as well as by the core that triggered
the machine check).  Console logs look like this:


This patch (of 2):

If the kernel is copying a page as the result of a copy-on-write
fault and runs into an uncorrectable error, Linux will crash because
it does not have recovery code for this case where poison is consumed
by the kernel.

It is easy to set up a test case. Just inject an error into a private
page, fork(2), and have the child process write to the page.

I wrapped that neatly into a test at:

  git://git.kernel.org/pub/scm/linux/kernel/git/aegl/ras-tools.git

just enable ACPI error injection and run:

  # ./einj_mem-uc -f copy-on-write

Add a new copy_user_highpage_mc() function that uses copy_mc_to_kernel()
on architectures where that is available (currently x86 and powerpc).
When an error is detected during the page copy, return VM_FAULT_HWPOISON
to caller of wp_page_copy(). This propagates up the call stack. Both x86
and powerpc have code in their fault handler to deal with this code by
sending a SIGBUS to the application.

Note that this patch avoids a system crash and signals the process that
triggered the copy-on-write action. It does not take any action for the
memory error that is still in the shared page. To handle that a call to
memory_failure() is needed. But this cannot be done from wp_page_copy()
because it holds mmap_lock(). Perhaps the architecture fault handlers
can deal with this loose end in a subsequent patch?

On Intel/x86 this loose end will often be handled automatically because
the memory controller provides an additional notification of the h/w
poison in memory, the handler for this will call memory_failure(). This
isn't a 100% solution. If there are multiple errors, not all may be
logged in this way.

[tony.luck@intel.com: add call to kmsan_unpoison_memory(), per Miaohe Lin]
  Link: https://lkml.kernel.org/r/20221031201029.102123-2-tony.luck@intel.com
Link: https://lkml.kernel.org/r/20221021200120.175753-1-tony.luck@intel.com
Link: https://lkml.kernel.org/r/20221021200120.175753-2-tony.luck@intel.comSigned-off-by: default avatarTony Luck <tony.luck@intel.com>
Reviewed-by: default avatarDan Williams <dan.j.williams@intel.com>
Reviewed-by: default avatarNaoya Horiguchi <naoya.horiguchi@nec.com>
Reviewed-by: default avatarMiaohe Lin <linmiaohe@huawei.com>
Reviewed-by: default avatarAlexander Potapenko <glider@google.com>
Tested-by: default avatarShuai Xue <xueshuai@linux.alibaba.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent f689054a
...@@ -319,6 +319,32 @@ static inline void copy_user_highpage(struct page *to, struct page *from, ...@@ -319,6 +319,32 @@ static inline void copy_user_highpage(struct page *to, struct page *from,
#endif #endif
#ifdef copy_mc_to_kernel
static inline int copy_mc_user_highpage(struct page *to, struct page *from,
unsigned long vaddr, struct vm_area_struct *vma)
{
unsigned long ret;
char *vfrom, *vto;
vfrom = kmap_local_page(from);
vto = kmap_local_page(to);
ret = copy_mc_to_kernel(vto, vfrom, PAGE_SIZE);
if (!ret)
kmsan_unpoison_memory(page_address(to), PAGE_SIZE);
kunmap_local(vto);
kunmap_local(vfrom);
return ret;
}
#else
static inline int copy_mc_user_highpage(struct page *to, struct page *from,
unsigned long vaddr, struct vm_area_struct *vma)
{
copy_user_highpage(to, from, vaddr, vma);
return 0;
}
#endif
#ifndef __HAVE_ARCH_COPY_HIGHPAGE #ifndef __HAVE_ARCH_COPY_HIGHPAGE
static inline void copy_highpage(struct page *to, struct page *from) static inline void copy_highpage(struct page *to, struct page *from)
......
...@@ -2798,10 +2798,16 @@ static inline int pte_unmap_same(struct vm_fault *vmf) ...@@ -2798,10 +2798,16 @@ static inline int pte_unmap_same(struct vm_fault *vmf)
return same; return same;
} }
static inline bool __wp_page_copy_user(struct page *dst, struct page *src, /*
* Return:
* 0: copied succeeded
* -EHWPOISON: copy failed due to hwpoison in source page
* -EAGAIN: copied failed (some other reason)
*/
static inline int __wp_page_copy_user(struct page *dst, struct page *src,
struct vm_fault *vmf) struct vm_fault *vmf)
{ {
bool ret; int ret;
void *kaddr; void *kaddr;
void __user *uaddr; void __user *uaddr;
bool locked = false; bool locked = false;
...@@ -2810,8 +2816,9 @@ static inline bool __wp_page_copy_user(struct page *dst, struct page *src, ...@@ -2810,8 +2816,9 @@ static inline bool __wp_page_copy_user(struct page *dst, struct page *src,
unsigned long addr = vmf->address; unsigned long addr = vmf->address;
if (likely(src)) { if (likely(src)) {
copy_user_highpage(dst, src, addr, vma); if (copy_mc_user_highpage(dst, src, addr, vma))
return true; return -EHWPOISON;
return 0;
} }
/* /*
...@@ -2838,7 +2845,7 @@ static inline bool __wp_page_copy_user(struct page *dst, struct page *src, ...@@ -2838,7 +2845,7 @@ static inline bool __wp_page_copy_user(struct page *dst, struct page *src,
* and update local tlb only * and update local tlb only
*/ */
update_mmu_tlb(vma, addr, vmf->pte); update_mmu_tlb(vma, addr, vmf->pte);
ret = false; ret = -EAGAIN;
goto pte_unlock; goto pte_unlock;
} }
...@@ -2863,7 +2870,7 @@ static inline bool __wp_page_copy_user(struct page *dst, struct page *src, ...@@ -2863,7 +2870,7 @@ static inline bool __wp_page_copy_user(struct page *dst, struct page *src,
if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) { if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) {
/* The PTE changed under us, update local tlb */ /* The PTE changed under us, update local tlb */
update_mmu_tlb(vma, addr, vmf->pte); update_mmu_tlb(vma, addr, vmf->pte);
ret = false; ret = -EAGAIN;
goto pte_unlock; goto pte_unlock;
} }
...@@ -2882,7 +2889,7 @@ static inline bool __wp_page_copy_user(struct page *dst, struct page *src, ...@@ -2882,7 +2889,7 @@ static inline bool __wp_page_copy_user(struct page *dst, struct page *src,
} }
} }
ret = true; ret = 0;
pte_unlock: pte_unlock:
if (locked) if (locked)
...@@ -3054,6 +3061,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) ...@@ -3054,6 +3061,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
pte_t entry; pte_t entry;
int page_copied = 0; int page_copied = 0;
struct mmu_notifier_range range; struct mmu_notifier_range range;
int ret;
delayacct_wpcopy_start(); delayacct_wpcopy_start();
...@@ -3071,19 +3079,21 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) ...@@ -3071,19 +3079,21 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
if (!new_page) if (!new_page)
goto oom; goto oom;
if (!__wp_page_copy_user(new_page, old_page, vmf)) { ret = __wp_page_copy_user(new_page, old_page, vmf);
if (ret) {
/* /*
* COW failed, if the fault was solved by other, * COW failed, if the fault was solved by other,
* it's fine. If not, userspace would re-fault on * it's fine. If not, userspace would re-fault on
* the same address and we will handle the fault * the same address and we will handle the fault
* from the second attempt. * from the second attempt.
* The -EHWPOISON case will not be retried.
*/ */
put_page(new_page); put_page(new_page);
if (old_page) if (old_page)
put_page(old_page); put_page(old_page);
delayacct_wpcopy_end(); delayacct_wpcopy_end();
return 0; return ret == -EHWPOISON ? VM_FAULT_HWPOISON : 0;
} }
kmsan_copy_page_meta(new_page, old_page); kmsan_copy_page_meta(new_page, old_page);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment