Commit cae106dd authored by David Stevens's avatar David Stevens Committed by Andrew Morton

mm/khugepaged: refactor collapse_file control flow

Add a rollback label to deal with failure, instead of continuously
checking for RESULT_SUCCESS, to make it easier to add more failure cases. 
The refactoring also allows the collapse_file tracepoint to include hpage
on success (instead of NULL).

Link: https://lkml.kernel.org/r/20230404120117.2562166-3-stevensd@google.comSigned-off-by: default avatarDavid Stevens <stevensd@chromium.org>
Acked-by: default avatarPeter Xu <peterx@redhat.com>
Reviewed-by: default avatarYang Shi <shy828301@gmail.com>
Acked-by: default avatarHugh Dickins <hughd@google.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Jiaqi Yan <jiaqiyan@google.com>
Cc: "Kirill A. Shutemov" <kirill@shutemov.name>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent efa3d814
......@@ -1894,6 +1894,12 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
if (result != SCAN_SUCCEED)
goto out;
__SetPageLocked(hpage);
if (is_shmem)
__SetPageSwapBacked(hpage);
hpage->index = start;
hpage->mapping = mapping;
/*
* Ensure we have slots for all the pages in the range. This is
* almost certainly a no-op because most of the pages must be present
......@@ -1906,16 +1912,10 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
xas_unlock_irq(&xas);
if (!xas_nomem(&xas, GFP_KERNEL)) {
result = SCAN_FAIL;
goto out;
goto rollback;
}
} while (1);
__SetPageLocked(hpage);
if (is_shmem)
__SetPageSwapBacked(hpage);
hpage->index = start;
hpage->mapping = mapping;
/*
* At this point the hpage is locked and not up-to-date.
* It's safe to insert it into the page cache, because nobody would
......@@ -2152,137 +2152,133 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
*/
try_to_unmap_flush();
if (result == SCAN_SUCCEED) {
/*
* Replacing old pages with new one has succeeded, now we
* attempt to copy the contents.
*/
index = start;
list_for_each_entry(page, &pagelist, lru) {
while (index < page->index) {
clear_highpage(hpage + (index % HPAGE_PMD_NR));
index++;
}
if (copy_mc_highpage(hpage + (page->index % HPAGE_PMD_NR),
page) > 0) {
result = SCAN_COPY_MC;
break;
}
index++;
}
while (result == SCAN_SUCCEED && index < end) {
if (result != SCAN_SUCCEED)
goto rollback;
/*
* Replacing old pages with new one has succeeded, now we
* attempt to copy the contents.
*/
index = start;
list_for_each_entry(page, &pagelist, lru) {
while (index < page->index) {
clear_highpage(hpage + (index % HPAGE_PMD_NR));
index++;
}
if (copy_mc_highpage(hpage + (page->index % HPAGE_PMD_NR), page) > 0) {
result = SCAN_COPY_MC;
goto rollback;
}
index++;
}
while (index < end) {
clear_highpage(hpage + (index % HPAGE_PMD_NR));
index++;
}
/*
* Copying old pages to huge one has succeeded, now we
* need to free the old pages.
*/
list_for_each_entry_safe(page, tmp, &pagelist, lru) {
list_del(&page->lru);
page->mapping = NULL;
page_ref_unfreeze(page, 1);
ClearPageActive(page);
ClearPageUnevictable(page);
unlock_page(page);
put_page(page);
}
nr = thp_nr_pages(hpage);
if (result == SCAN_SUCCEED) {
/*
* Copying old pages to huge one has succeeded, now we
* need to free the old pages.
*/
list_for_each_entry_safe(page, tmp, &pagelist, lru) {
list_del(&page->lru);
page->mapping = NULL;
page_ref_unfreeze(page, 1);
ClearPageActive(page);
ClearPageUnevictable(page);
unlock_page(page);
put_page(page);
}
xas_lock_irq(&xas);
if (is_shmem)
__mod_lruvec_page_state(hpage, NR_SHMEM_THPS, nr);
else
__mod_lruvec_page_state(hpage, NR_FILE_THPS, nr);
xas_lock_irq(&xas);
if (is_shmem)
__mod_lruvec_page_state(hpage, NR_SHMEM_THPS, nr);
else
__mod_lruvec_page_state(hpage, NR_FILE_THPS, nr);
if (nr_none) {
__mod_lruvec_page_state(hpage, NR_FILE_PAGES, nr_none);
/* nr_none is always 0 for non-shmem. */
__mod_lruvec_page_state(hpage, NR_SHMEM, nr_none);
}
/* Join all the small entries into a single multi-index entry. */
xas_set_order(&xas, start, HPAGE_PMD_ORDER);
xas_store(&xas, hpage);
xas_unlock_irq(&xas);
if (nr_none) {
__mod_lruvec_page_state(hpage, NR_FILE_PAGES, nr_none);
/* nr_none is always 0 for non-shmem. */
__mod_lruvec_page_state(hpage, NR_SHMEM, nr_none);
}
/* Join all the small entries into a single multi-index entry. */
xas_set_order(&xas, start, HPAGE_PMD_ORDER);
xas_store(&xas, hpage);
xas_unlock_irq(&xas);
folio = page_folio(hpage);
folio_mark_uptodate(folio);
folio_ref_add(folio, HPAGE_PMD_NR - 1);
folio = page_folio(hpage);
folio_mark_uptodate(folio);
folio_ref_add(folio, HPAGE_PMD_NR - 1);
if (is_shmem)
folio_mark_dirty(folio);
folio_add_lru(folio);
if (is_shmem)
folio_mark_dirty(folio);
folio_add_lru(folio);
/*
* Remove pte page tables, so we can re-fault the page as huge.
*/
result = retract_page_tables(mapping, start, mm, addr, hpage,
cc);
unlock_page(hpage);
goto out;
rollback:
/* Something went wrong: roll back page cache changes */
xas_lock_irq(&xas);
if (nr_none) {
mapping->nrpages -= nr_none;
shmem_uncharge(mapping->host, nr_none);
}
/*
* Remove pte page tables, so we can re-fault the page as huge.
*/
result = retract_page_tables(mapping, start, mm, addr, hpage,
cc);
unlock_page(hpage);
hpage = NULL;
} else {
/* Something went wrong: roll back page cache changes */
xas_lock_irq(&xas);
if (nr_none) {
mapping->nrpages -= nr_none;
shmem_uncharge(mapping->host, nr_none);
xas_set(&xas, start);
xas_for_each(&xas, page, end - 1) {
page = list_first_entry_or_null(&pagelist,
struct page, lru);
if (!page || xas.xa_index < page->index) {
if (!nr_none)
break;
nr_none--;
/* Put holes back where they were */
xas_store(&xas, NULL);
continue;
}
xas_set(&xas, start);
xas_for_each(&xas, page, end - 1) {
page = list_first_entry_or_null(&pagelist,
struct page, lru);
if (!page || xas.xa_index < page->index) {
if (!nr_none)
break;
nr_none--;
/* Put holes back where they were */
xas_store(&xas, NULL);
continue;
}
VM_BUG_ON_PAGE(page->index != xas.xa_index, page);
VM_BUG_ON_PAGE(page->index != xas.xa_index, page);
/* Unfreeze the page. */
list_del(&page->lru);
page_ref_unfreeze(page, 2);
xas_store(&xas, page);
xas_pause(&xas);
xas_unlock_irq(&xas);
unlock_page(page);
putback_lru_page(page);
xas_lock_irq(&xas);
}
VM_BUG_ON(nr_none);
/* Unfreeze the page. */
list_del(&page->lru);
page_ref_unfreeze(page, 2);
xas_store(&xas, page);
xas_pause(&xas);
xas_unlock_irq(&xas);
unlock_page(page);
putback_lru_page(page);
xas_lock_irq(&xas);
}
VM_BUG_ON(nr_none);
/*
* Undo the updates of filemap_nr_thps_inc for non-SHMEM
* file only. This undo is not needed unless failure is
* due to SCAN_COPY_MC.
*/
if (!is_shmem && result == SCAN_COPY_MC) {
filemap_nr_thps_dec(mapping);
/*
* Undo the updates of filemap_nr_thps_inc for non-SHMEM
* file only. This undo is not needed unless failure is
* due to SCAN_COPY_MC.
* Paired with smp_mb() in do_dentry_open() to
* ensure the update to nr_thps is visible.
*/
if (!is_shmem && result == SCAN_COPY_MC) {
filemap_nr_thps_dec(mapping);
/*
* Paired with smp_mb() in do_dentry_open() to
* ensure the update to nr_thps is visible.
*/
smp_mb();
}
smp_mb();
}
xas_unlock_irq(&xas);
xas_unlock_irq(&xas);
hpage->mapping = NULL;
}
hpage->mapping = NULL;
if (hpage)
unlock_page(hpage);
unlock_page(hpage);
put_page(hpage);
out:
VM_BUG_ON(!list_empty(&pagelist));
if (hpage)
put_page(hpage);
trace_mm_khugepaged_collapse_file(mm, hpage, index, is_shmem, addr, file, nr, result);
return result;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment