Commit 1cb9dc4b authored by Liu Shixin's avatar Liu Shixin Committed by Andrew Morton

mm: hwpoison: support recovery from HugePage copy-on-write faults

copy-on-write of hugetlb user pages with uncorrectable errors will result
in a kernel crash.  This is because the copy is performed in kernel mode
and in general we can not handle accessing memory with such errors while
in kernel mode.  Commit a873dfe1 ("mm, hwpoison: try to recover from
copy-on write faults") introduced the routine copy_user_highpage_mc() to
gracefully handle copying of user pages with uncorrectable errors. 
However, the separate hugetlb copy-on-write code paths were not modified
as part of commit a873dfe1.

Modify hugetlb copy-on-write code paths to use copy_mc_user_highpage() so
that they can also gracefully handle uncorrectable errors in user pages. 
This involves changing the hugetlb specific routine
copy_user_large_folio() from type void to int so that it can return an
error.  Modify the hugetlb userfaultfd code in the same way so that it can
return -EHWPOISON if it encounters an uncorrectable error.

Link: https://lkml.kernel.org/r/20230413131349.2524210-1-liushixin2@huawei.comSigned-off-by: default avatarLiu Shixin <liushixin2@huawei.com>
Acked-by: default avatarMike Kravetz <mike.kravetz@oracle.com>
Reviewed-by: default avatarNaoya Horiguchi <naoya.horiguchi@nec.com>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Tony Luck <tony.luck@intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent ec342603
......@@ -3693,9 +3693,9 @@ extern const struct attribute_group memory_failure_attr_group;
extern void clear_huge_page(struct page *page,
unsigned long addr_hint,
unsigned int pages_per_huge_page);
void copy_user_large_folio(struct folio *dst, struct folio *src,
unsigned long addr_hint,
struct vm_area_struct *vma);
int copy_user_large_folio(struct folio *dst, struct folio *src,
unsigned long addr_hint,
struct vm_area_struct *vma);
long copy_folio_from_user(struct folio *dst_folio,
const void __user *usr_src,
bool allow_pagefault);
......
......@@ -5097,10 +5097,14 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
ret = PTR_ERR(new_folio);
break;
}
copy_user_large_folio(new_folio,
ret = copy_user_large_folio(new_folio,
page_folio(ptepage),
addr, dst_vma);
put_page(ptepage);
if (ret) {
folio_put(new_folio);
break;
}
/* Install the new hugetlb folio if src pte stable */
dst_ptl = huge_pte_lock(h, dst, dst_pte);
......@@ -5617,7 +5621,10 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
goto out_release_all;
}
copy_user_large_folio(new_folio, page_folio(old_page), address, vma);
if (copy_user_large_folio(new_folio, page_folio(old_page), address, vma)) {
ret = VM_FAULT_HWPOISON_LARGE;
goto out_release_all;
}
__folio_mark_uptodate(new_folio);
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, haddr,
......@@ -6260,9 +6267,13 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
*foliop = NULL;
goto out;
}
copy_user_large_folio(folio, *foliop, dst_addr, dst_vma);
ret = copy_user_large_folio(folio, *foliop, dst_addr, dst_vma);
folio_put(*foliop);
*foliop = NULL;
if (ret) {
folio_put(folio);
goto out;
}
}
/*
......
......@@ -5733,12 +5733,12 @@ EXPORT_SYMBOL(__might_fault);
* operation. The target subpage will be processed last to keep its
* cache lines hot.
*/
static inline void process_huge_page(
static inline int process_huge_page(
unsigned long addr_hint, unsigned int pages_per_huge_page,
void (*process_subpage)(unsigned long addr, int idx, void *arg),
int (*process_subpage)(unsigned long addr, int idx, void *arg),
void *arg)
{
int i, n, base, l;
int i, n, base, l, ret;
unsigned long addr = addr_hint &
~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
......@@ -5752,7 +5752,9 @@ static inline void process_huge_page(
/* Process subpages at the end of huge page */
for (i = pages_per_huge_page - 1; i >= 2 * n; i--) {
cond_resched();
process_subpage(addr + i * PAGE_SIZE, i, arg);
ret = process_subpage(addr + i * PAGE_SIZE, i, arg);
if (ret)
return ret;
}
} else {
/* If target subpage in second half of huge page */
......@@ -5761,7 +5763,9 @@ static inline void process_huge_page(
/* Process subpages at the begin of huge page */
for (i = 0; i < base; i++) {
cond_resched();
process_subpage(addr + i * PAGE_SIZE, i, arg);
ret = process_subpage(addr + i * PAGE_SIZE, i, arg);
if (ret)
return ret;
}
}
/*
......@@ -5773,10 +5777,15 @@ static inline void process_huge_page(
int right_idx = base + 2 * l - 1 - i;
cond_resched();
process_subpage(addr + left_idx * PAGE_SIZE, left_idx, arg);
ret = process_subpage(addr + left_idx * PAGE_SIZE, left_idx, arg);
if (ret)
return ret;
cond_resched();
process_subpage(addr + right_idx * PAGE_SIZE, right_idx, arg);
ret = process_subpage(addr + right_idx * PAGE_SIZE, right_idx, arg);
if (ret)
return ret;
}
return 0;
}
static void clear_gigantic_page(struct page *page,
......@@ -5794,11 +5803,12 @@ static void clear_gigantic_page(struct page *page,
}
}
static void clear_subpage(unsigned long addr, int idx, void *arg)
static int clear_subpage(unsigned long addr, int idx, void *arg)
{
struct page *page = arg;
clear_user_highpage(page + idx, addr);
return 0;
}
void clear_huge_page(struct page *page,
......@@ -5815,7 +5825,7 @@ void clear_huge_page(struct page *page,
process_huge_page(addr_hint, pages_per_huge_page, clear_subpage, page);
}
static void copy_user_gigantic_page(struct folio *dst, struct folio *src,
static int copy_user_gigantic_page(struct folio *dst, struct folio *src,
unsigned long addr,
struct vm_area_struct *vma,
unsigned int pages_per_huge_page)
......@@ -5829,8 +5839,13 @@ static void copy_user_gigantic_page(struct folio *dst, struct folio *src,
src_page = folio_page(src, i);
cond_resched();
copy_user_highpage(dst_page, src_page, addr + i*PAGE_SIZE, vma);
if (copy_mc_user_highpage(dst_page, src_page,
addr + i*PAGE_SIZE, vma)) {
memory_failure_queue(page_to_pfn(src_page), 0);
return -EHWPOISON;
}
}
return 0;
}
struct copy_subpage_arg {
......@@ -5839,16 +5854,20 @@ struct copy_subpage_arg {
struct vm_area_struct *vma;
};
static void copy_subpage(unsigned long addr, int idx, void *arg)
static int copy_subpage(unsigned long addr, int idx, void *arg)
{
struct copy_subpage_arg *copy_arg = arg;
copy_user_highpage(copy_arg->dst + idx, copy_arg->src + idx,
addr, copy_arg->vma);
if (copy_mc_user_highpage(copy_arg->dst + idx, copy_arg->src + idx,
addr, copy_arg->vma)) {
memory_failure_queue(page_to_pfn(copy_arg->src + idx), 0);
return -EHWPOISON;
}
return 0;
}
void copy_user_large_folio(struct folio *dst, struct folio *src,
unsigned long addr_hint, struct vm_area_struct *vma)
int copy_user_large_folio(struct folio *dst, struct folio *src,
unsigned long addr_hint, struct vm_area_struct *vma)
{
unsigned int pages_per_huge_page = folio_nr_pages(dst);
unsigned long addr = addr_hint &
......@@ -5859,13 +5878,11 @@ void copy_user_large_folio(struct folio *dst, struct folio *src,
.vma = vma,
};
if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
copy_user_gigantic_page(dst, src, addr, vma,
pages_per_huge_page);
return;
}
if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES))
return copy_user_gigantic_page(dst, src, addr, vma,
pages_per_huge_page);
process_huge_page(addr_hint, pages_per_huge_page, copy_subpage, &arg);
return process_huge_page(addr_hint, pages_per_huge_page, copy_subpage, &arg);
}
long copy_folio_from_user(struct folio *dst_folio,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment