Commit 12904d95 authored by Jiaqi Yan's avatar Jiaqi Yan Committed by Andrew Morton

mm/khugepaged: recover from poisoned file-backed memory

Make collapse_file roll back when copying pages failed. More concretely:
- extract copying operations into a separate loop
- postpone the updates for nr_none until both scanning and copying
  succeeded
- postpone joining small xarray entries until both scanning and copying
  succeeded
- postpone the update operations to NR_XXX_THPS until both scanning and
  copying succeeded
- for non-SHMEM file, roll back filemap_nr_thps_inc if scan succeeded but
  copying failed

Tested manually:
0. Enable khugepaged on system under test. Mount tmpfs at /mnt/ramdisk.
1. Start a two-thread application. Each thread allocates a chunk of
   non-huge memory buffer from /mnt/ramdisk.
2. Pick 4 random buffer address (2 in each thread) and inject
   uncorrectable memory errors at physical addresses.
3. Signal both threads to make their memory buffer collapsible, i.e.
   calling madvise(MADV_HUGEPAGE).
4. Wait and then check kernel log: khugepaged is able to recover from
   poisoned pages by skipping them.
5. Signal both threads to inspect their buffer contents and make sure no
   data corruption.

Link: https://lkml.kernel.org/r/20230329151121.949896-4-jiaqiyan@google.comSigned-off-by: default avatarJiaqi Yan <jiaqiyan@google.com>
Reviewed-by: default avatarYang Shi <shy828301@gmail.com>
Acked-by: default avatarHugh Dickins <hughd@google.com>
Cc: David Stevens <stevensd@chromium.org>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: "Kirill A. Shutemov" <kirill@shutemov.name>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Naoya Horiguchi <naoya.horiguchi@nec.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Tong Tiangen <tongtiangen@huawei.com>
Cc: Tony Luck <tony.luck@intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 6efc7afb
...@@ -1877,6 +1877,9 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, ...@@ -1877,6 +1877,9 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
{ {
struct address_space *mapping = file->f_mapping; struct address_space *mapping = file->f_mapping;
struct page *hpage; struct page *hpage;
struct page *page;
struct page *tmp;
struct folio *folio;
pgoff_t index = 0, end = start + HPAGE_PMD_NR; pgoff_t index = 0, end = start + HPAGE_PMD_NR;
LIST_HEAD(pagelist); LIST_HEAD(pagelist);
XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER); XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
...@@ -1921,8 +1924,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, ...@@ -1921,8 +1924,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
xas_set(&xas, start); xas_set(&xas, start);
for (index = start; index < end; index++) { for (index = start; index < end; index++) {
struct page *page = xas_next(&xas); page = xas_next(&xas);
struct folio *folio;
VM_BUG_ON(index != xas.xa_index); VM_BUG_ON(index != xas.xa_index);
if (is_shmem) { if (is_shmem) {
...@@ -2116,12 +2118,8 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, ...@@ -2116,12 +2118,8 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
put_page(page); put_page(page);
goto xa_unlocked; goto xa_unlocked;
} }
nr = thp_nr_pages(hpage);
if (is_shmem) if (!is_shmem) {
__mod_lruvec_page_state(hpage, NR_SHMEM_THPS, nr);
else {
__mod_lruvec_page_state(hpage, NR_FILE_THPS, nr);
filemap_nr_thps_inc(mapping); filemap_nr_thps_inc(mapping);
/* /*
* Paired with smp_mb() in do_dentry_open() to ensure * Paired with smp_mb() in do_dentry_open() to ensure
...@@ -2132,21 +2130,10 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, ...@@ -2132,21 +2130,10 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
smp_mb(); smp_mb();
if (inode_is_open_for_write(mapping->host)) { if (inode_is_open_for_write(mapping->host)) {
result = SCAN_FAIL; result = SCAN_FAIL;
__mod_lruvec_page_state(hpage, NR_FILE_THPS, -nr);
filemap_nr_thps_dec(mapping); filemap_nr_thps_dec(mapping);
goto xa_locked;
}
} }
if (nr_none) {
__mod_lruvec_page_state(hpage, NR_FILE_PAGES, nr_none);
/* nr_none is always 0 for non-shmem. */
__mod_lruvec_page_state(hpage, NR_SHMEM, nr_none);
} }
/* Join all the small entries into a single multi-index entry */
xas_set_order(&xas, start, HPAGE_PMD_ORDER);
xas_store(&xas, hpage);
/* Here we can't get an ENOMEM (because entries were /* Here we can't get an ENOMEM (because entries were
* previously allocated) But let's check for errors * previously allocated) But let's check for errors
* (XArray implementation can be changed in the future) * (XArray implementation can be changed in the future)
...@@ -2164,21 +2151,36 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, ...@@ -2164,21 +2151,36 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
try_to_unmap_flush(); try_to_unmap_flush();
if (result == SCAN_SUCCEED) { if (result == SCAN_SUCCEED) {
struct page *page, *tmp;
struct folio *folio;
/* /*
* Replacing old pages with new one has succeeded, now we * Replacing old pages with new one has succeeded, now we
* need to copy the content and free the old pages. * attempt to copy the contents.
*/ */
index = start; index = start;
list_for_each_entry_safe(page, tmp, &pagelist, lru) { list_for_each_entry(page, &pagelist, lru) {
while (index < page->index) { while (index < page->index) {
clear_highpage(hpage + (index % HPAGE_PMD_NR)); clear_highpage(hpage + (index % HPAGE_PMD_NR));
index++; index++;
} }
copy_highpage(hpage + (page->index % HPAGE_PMD_NR), if (copy_mc_highpage(hpage + (page->index % HPAGE_PMD_NR),
page); page) > 0) {
result = SCAN_COPY_MC;
break;
}
index++;
}
while (result == SCAN_SUCCEED && index < end) {
clear_highpage(hpage + (index % HPAGE_PMD_NR));
index++;
}
}
nr = thp_nr_pages(hpage);
if (result == SCAN_SUCCEED) {
/*
* Copying old pages to huge one has succeeded, now we
* need to free the old pages.
*/
list_for_each_entry_safe(page, tmp, &pagelist, lru) {
list_del(&page->lru); list_del(&page->lru);
page->mapping = NULL; page->mapping = NULL;
page_ref_unfreeze(page, 1); page_ref_unfreeze(page, 1);
...@@ -2186,12 +2188,23 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, ...@@ -2186,12 +2188,23 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
ClearPageUnevictable(page); ClearPageUnevictable(page);
unlock_page(page); unlock_page(page);
put_page(page); put_page(page);
index++;
} }
while (index < end) {
clear_highpage(hpage + (index % HPAGE_PMD_NR)); xas_lock_irq(&xas);
index++; if (is_shmem)
__mod_lruvec_page_state(hpage, NR_SHMEM_THPS, nr);
else
__mod_lruvec_page_state(hpage, NR_FILE_THPS, nr);
if (nr_none) {
__mod_lruvec_page_state(hpage, NR_FILE_PAGES, nr_none);
/* nr_none is always 0 for non-shmem. */
__mod_lruvec_page_state(hpage, NR_SHMEM, nr_none);
} }
/* Join all the small entries into a single multi-index entry. */
xas_set_order(&xas, start, HPAGE_PMD_ORDER);
xas_store(&xas, hpage);
xas_unlock_irq(&xas);
folio = page_folio(hpage); folio = page_folio(hpage);
folio_mark_uptodate(folio); folio_mark_uptodate(folio);
...@@ -2209,8 +2222,6 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, ...@@ -2209,8 +2222,6 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
unlock_page(hpage); unlock_page(hpage);
hpage = NULL; hpage = NULL;
} else { } else {
struct page *page;
/* Something went wrong: roll back page cache changes */ /* Something went wrong: roll back page cache changes */
xas_lock_irq(&xas); xas_lock_irq(&xas);
if (nr_none) { if (nr_none) {
...@@ -2244,6 +2255,20 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, ...@@ -2244,6 +2255,20 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
xas_lock_irq(&xas); xas_lock_irq(&xas);
} }
VM_BUG_ON(nr_none); VM_BUG_ON(nr_none);
/*
* Undo the updates of filemap_nr_thps_inc for non-SHMEM
* file only. This undo is not needed unless failure is
* due to SCAN_COPY_MC.
*/
if (!is_shmem && result == SCAN_COPY_MC) {
filemap_nr_thps_dec(mapping);
/*
* Paired with smp_mb() in do_dentry_open() to
* ensure the update to nr_thps is visible.
*/
smp_mb();
}
xas_unlock_irq(&xas); xas_unlock_irq(&xas);
hpage->mapping = NULL; hpage->mapping = NULL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment