Commit 2ff6cece authored by Sidhartha Kumar's avatar Sidhartha Kumar Committed by Andrew Morton

mm/memory-failure: convert hugetlb_clear_page_hwpoison to folios

Change hugetlb_clear_page_hwpoison() to folio_clear_hugetlb_hwpoison() by
changing the function to take in a folio.  This converts one use of
ClearPageHWPoison and HPageRawHwpUnreliable to their folio equivalents.

Link: https://lkml.kernel.org/r/20230112204608.80136-4-sidhartha.kumar@oracle.comSigned-off-by: default avatarSidhartha Kumar <sidhartha.kumar@oracle.com>
Acked-by: default avatarNaoya Horiguchi <naoya.horiguchi@nec.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent bc1cfde1
...@@ -878,9 +878,9 @@ extern int dissolve_free_huge_pages(unsigned long start_pfn, ...@@ -878,9 +878,9 @@ extern int dissolve_free_huge_pages(unsigned long start_pfn,
unsigned long end_pfn); unsigned long end_pfn);
#ifdef CONFIG_MEMORY_FAILURE #ifdef CONFIG_MEMORY_FAILURE
extern void hugetlb_clear_page_hwpoison(struct page *hpage); extern void folio_clear_hugetlb_hwpoison(struct folio *folio);
#else #else
static inline void hugetlb_clear_page_hwpoison(struct page *hpage) static inline void folio_clear_hugetlb_hwpoison(struct folio *folio)
{ {
} }
#endif #endif
......
...@@ -1731,7 +1731,7 @@ static void __update_and_free_page(struct hstate *h, struct page *page) ...@@ -1731,7 +1731,7 @@ static void __update_and_free_page(struct hstate *h, struct page *page)
* which makes any healthy subpages reusable. * which makes any healthy subpages reusable.
*/ */
if (unlikely(folio_test_hwpoison(folio))) if (unlikely(folio_test_hwpoison(folio)))
hugetlb_clear_page_hwpoison(&folio->page); folio_clear_hugetlb_hwpoison(folio);
for (i = 0; i < pages_per_huge_page(h); i++) { for (i = 0; i < pages_per_huge_page(h); i++) {
subpage = folio_page(folio, i); subpage = folio_page(folio, i);
......
...@@ -1785,12 +1785,12 @@ static unsigned long free_raw_hwp_pages(struct page *hpage, bool move_flag) ...@@ -1785,12 +1785,12 @@ static unsigned long free_raw_hwp_pages(struct page *hpage, bool move_flag)
return __free_raw_hwp_pages(hpage, move_flag); return __free_raw_hwp_pages(hpage, move_flag);
} }
void hugetlb_clear_page_hwpoison(struct page *hpage) void folio_clear_hugetlb_hwpoison(struct folio *folio)
{ {
if (HPageRawHwpUnreliable(hpage)) if (folio_test_hugetlb_raw_hwp_unreliable(folio))
return; return;
ClearPageHWPoison(hpage); folio_clear_hwpoison(folio);
free_raw_hwp_pages(hpage, true); free_raw_hwp_pages(&folio->page, true);
} }
/* /*
...@@ -1889,7 +1889,7 @@ static int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb ...@@ -1889,7 +1889,7 @@ static int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb
folio_lock(folio); folio_lock(folio);
if (hwpoison_filter(p)) { if (hwpoison_filter(p)) {
hugetlb_clear_page_hwpoison(&folio->page); folio_clear_hugetlb_hwpoison(folio);
if (migratable_cleared) if (migratable_cleared)
folio_set_hugetlb_migratable(folio); folio_set_hugetlb_migratable(folio);
folio_unlock(folio); folio_unlock(folio);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment