Commit cae106dd authored by David Stevens's avatar David Stevens Committed by Andrew Morton

mm/khugepaged: refactor collapse_file control flow

Add a rollback label to deal with failure, instead of continuously
checking for RESULT_SUCCESS, to make it easier to add more failure cases. 
The refactoring also allows the collapse_file tracepoint to include hpage
on success (instead of NULL).

Link: https://lkml.kernel.org/r/20230404120117.2562166-3-stevensd@google.comSigned-off-by: default avatarDavid Stevens <stevensd@chromium.org>
Acked-by: default avatarPeter Xu <peterx@redhat.com>
Reviewed-by: default avatarYang Shi <shy828301@gmail.com>
Acked-by: default avatarHugh Dickins <hughd@google.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Jiaqi Yan <jiaqiyan@google.com>
Cc: "Kirill A. Shutemov" <kirill@shutemov.name>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent efa3d814
...@@ -1894,6 +1894,12 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, ...@@ -1894,6 +1894,12 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
if (result != SCAN_SUCCEED) if (result != SCAN_SUCCEED)
goto out; goto out;
__SetPageLocked(hpage);
if (is_shmem)
__SetPageSwapBacked(hpage);
hpage->index = start;
hpage->mapping = mapping;
/* /*
* Ensure we have slots for all the pages in the range. This is * Ensure we have slots for all the pages in the range. This is
* almost certainly a no-op because most of the pages must be present * almost certainly a no-op because most of the pages must be present
...@@ -1906,16 +1912,10 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, ...@@ -1906,16 +1912,10 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
xas_unlock_irq(&xas); xas_unlock_irq(&xas);
if (!xas_nomem(&xas, GFP_KERNEL)) { if (!xas_nomem(&xas, GFP_KERNEL)) {
result = SCAN_FAIL; result = SCAN_FAIL;
goto out; goto rollback;
} }
} while (1); } while (1);
__SetPageLocked(hpage);
if (is_shmem)
__SetPageSwapBacked(hpage);
hpage->index = start;
hpage->mapping = mapping;
/* /*
* At this point the hpage is locked and not up-to-date. * At this point the hpage is locked and not up-to-date.
* It's safe to insert it into the page cache, because nobody would * It's safe to insert it into the page cache, because nobody would
...@@ -2152,7 +2152,9 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, ...@@ -2152,7 +2152,9 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
*/ */
try_to_unmap_flush(); try_to_unmap_flush();
if (result == SCAN_SUCCEED) { if (result != SCAN_SUCCEED)
goto rollback;
/* /*
* Replacing old pages with new one has succeeded, now we * Replacing old pages with new one has succeeded, now we
* attempt to copy the contents. * attempt to copy the contents.
...@@ -2163,21 +2165,17 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, ...@@ -2163,21 +2165,17 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
clear_highpage(hpage + (index % HPAGE_PMD_NR)); clear_highpage(hpage + (index % HPAGE_PMD_NR));
index++; index++;
} }
if (copy_mc_highpage(hpage + (page->index % HPAGE_PMD_NR), if (copy_mc_highpage(hpage + (page->index % HPAGE_PMD_NR), page) > 0) {
page) > 0) {
result = SCAN_COPY_MC; result = SCAN_COPY_MC;
break; goto rollback;
} }
index++; index++;
} }
while (result == SCAN_SUCCEED && index < end) { while (index < end) {
clear_highpage(hpage + (index % HPAGE_PMD_NR)); clear_highpage(hpage + (index % HPAGE_PMD_NR));
index++; index++;
} }
}
nr = thp_nr_pages(hpage);
if (result == SCAN_SUCCEED) {
/* /*
* Copying old pages to huge one has succeeded, now we * Copying old pages to huge one has succeeded, now we
* need to free the old pages. * need to free the old pages.
...@@ -2192,6 +2190,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, ...@@ -2192,6 +2190,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
put_page(page); put_page(page);
} }
nr = thp_nr_pages(hpage);
xas_lock_irq(&xas); xas_lock_irq(&xas);
if (is_shmem) if (is_shmem)
__mod_lruvec_page_state(hpage, NR_SHMEM_THPS, nr); __mod_lruvec_page_state(hpage, NR_SHMEM_THPS, nr);
...@@ -2222,8 +2221,9 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, ...@@ -2222,8 +2221,9 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
result = retract_page_tables(mapping, start, mm, addr, hpage, result = retract_page_tables(mapping, start, mm, addr, hpage,
cc); cc);
unlock_page(hpage); unlock_page(hpage);
hpage = NULL; goto out;
} else {
rollback:
/* Something went wrong: roll back page cache changes */ /* Something went wrong: roll back page cache changes */
xas_lock_irq(&xas); xas_lock_irq(&xas);
if (nr_none) { if (nr_none) {
...@@ -2274,15 +2274,11 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, ...@@ -2274,15 +2274,11 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
xas_unlock_irq(&xas); xas_unlock_irq(&xas);
hpage->mapping = NULL; hpage->mapping = NULL;
}
if (hpage)
unlock_page(hpage); unlock_page(hpage);
put_page(hpage);
out: out:
VM_BUG_ON(!list_empty(&pagelist)); VM_BUG_ON(!list_empty(&pagelist));
if (hpage)
put_page(hpage);
trace_mm_khugepaged_collapse_file(mm, hpage, index, is_shmem, addr, file, nr, result); trace_mm_khugepaged_collapse_file(mm, hpage, index, is_shmem, addr, file, nr, result);
return result; return result;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment