Commit 03468a0f authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

mm/memory-failure: convert hwpoison_user_mappings to take a folio

Pass the folio from the callers, and use it throughout instead of hpage. 
Saves dozens of calls to compound_head().

Link: https://lkml.kernel.org/r/20240412193510.2356957-9-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Acked-by: default avatarMiaohe Lin <linmiaohe@huawei.com>
Reviewed-by: default avatarJane Chu <jane.chu@oracle.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Oscar Salvador <osalvador@suse.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 5dba5c35
...@@ -1559,24 +1559,24 @@ static int get_hwpoison_page(struct page *p, unsigned long flags) ...@@ -1559,24 +1559,24 @@ static int get_hwpoison_page(struct page *p, unsigned long flags)
* Do all that is necessary to remove user space mappings. Unmap * Do all that is necessary to remove user space mappings. Unmap
* the pages and send SIGBUS to the processes if the data was dirty. * the pages and send SIGBUS to the processes if the data was dirty.
*/ */
static bool hwpoison_user_mappings(struct page *p, unsigned long pfn, static bool hwpoison_user_mappings(struct folio *folio, struct page *p,
int flags, struct page *hpage) unsigned long pfn, int flags)
{ {
struct folio *folio = page_folio(hpage);
enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_SYNC | TTU_HWPOISON; enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_SYNC | TTU_HWPOISON;
struct address_space *mapping; struct address_space *mapping;
LIST_HEAD(tokill); LIST_HEAD(tokill);
bool unmap_success; bool unmap_success;
int forcekill; int forcekill;
bool mlocked = PageMlocked(hpage); bool mlocked = folio_test_mlocked(folio);
/* /*
* Here we are interested only in user-mapped pages, so skip any * Here we are interested only in user-mapped pages, so skip any
* other types of pages. * other types of pages.
*/ */
if (PageReserved(p) || PageSlab(p) || PageTable(p) || PageOffline(p)) if (folio_test_reserved(folio) || folio_test_slab(folio) ||
folio_test_pgtable(folio) || folio_test_offline(folio))
return true; return true;
if (!(PageLRU(hpage) || PageHuge(p))) if (!(folio_test_lru(folio) || folio_test_hugetlb(folio)))
return true; return true;
/* /*
...@@ -1586,7 +1586,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn, ...@@ -1586,7 +1586,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
if (!page_mapped(p)) if (!page_mapped(p))
return true; return true;
if (PageSwapCache(p)) { if (folio_test_swapcache(folio)) {
pr_err("%#lx: keeping poisoned page in swap cache\n", pfn); pr_err("%#lx: keeping poisoned page in swap cache\n", pfn);
ttu &= ~TTU_HWPOISON; ttu &= ~TTU_HWPOISON;
} }
...@@ -1597,11 +1597,11 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn, ...@@ -1597,11 +1597,11 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
* XXX: the dirty test could be racy: set_page_dirty() may not always * XXX: the dirty test could be racy: set_page_dirty() may not always
* be called inside page lock (it's recommended but not enforced). * be called inside page lock (it's recommended but not enforced).
*/ */
mapping = page_mapping(hpage); mapping = folio_mapping(folio);
if (!(flags & MF_MUST_KILL) && !PageDirty(hpage) && mapping && if (!(flags & MF_MUST_KILL) && !folio_test_dirty(folio) && mapping &&
mapping_can_writeback(mapping)) { mapping_can_writeback(mapping)) {
if (page_mkclean(hpage)) { if (folio_mkclean(folio)) {
SetPageDirty(hpage); folio_set_dirty(folio);
} else { } else {
ttu &= ~TTU_HWPOISON; ttu &= ~TTU_HWPOISON;
pr_info("%#lx: corrupted page was clean: dropped without side effects\n", pr_info("%#lx: corrupted page was clean: dropped without side effects\n",
...@@ -1616,7 +1616,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn, ...@@ -1616,7 +1616,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
*/ */
collect_procs(folio, p, &tokill, flags & MF_ACTION_REQUIRED); collect_procs(folio, p, &tokill, flags & MF_ACTION_REQUIRED);
if (PageHuge(hpage) && !PageAnon(hpage)) { if (folio_test_hugetlb(folio) && !folio_test_anon(folio)) {
/* /*
* For hugetlb pages in shared mappings, try_to_unmap * For hugetlb pages in shared mappings, try_to_unmap
* could potentially call huge_pmd_unshare. Because of * could potentially call huge_pmd_unshare. Because of
...@@ -1656,7 +1656,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn, ...@@ -1656,7 +1656,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
* use a more force-full uncatchable kill to prevent * use a more force-full uncatchable kill to prevent
* any accesses to the poisoned memory. * any accesses to the poisoned memory.
*/ */
forcekill = PageDirty(hpage) || (flags & MF_MUST_KILL) || forcekill = folio_test_dirty(folio) || (flags & MF_MUST_KILL) ||
!unmap_success; !unmap_success;
kill_procs(&tokill, forcekill, !unmap_success, pfn, flags); kill_procs(&tokill, forcekill, !unmap_success, pfn, flags);
...@@ -2100,7 +2100,7 @@ static int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb ...@@ -2100,7 +2100,7 @@ static int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb
page_flags = folio->flags; page_flags = folio->flags;
if (!hwpoison_user_mappings(p, pfn, flags, &folio->page)) { if (!hwpoison_user_mappings(folio, p, pfn, flags)) {
folio_unlock(folio); folio_unlock(folio);
return action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED); return action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
} }
...@@ -2367,7 +2367,7 @@ int memory_failure(unsigned long pfn, int flags) ...@@ -2367,7 +2367,7 @@ int memory_failure(unsigned long pfn, int flags)
* Now take care of user space mappings. * Now take care of user space mappings.
* Abort on fail: __filemap_remove_folio() assumes unmapped page. * Abort on fail: __filemap_remove_folio() assumes unmapped page.
*/ */
if (!hwpoison_user_mappings(p, pfn, flags, p)) { if (!hwpoison_user_mappings(folio, p, pfn, flags)) {
res = action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED); res = action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
goto unlock_page; goto unlock_page;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment