Commit 6b9a217e authored by Oscar Salvador's avatar Oscar Salvador Committed by Linus Torvalds

mm,hwpoison: refactor soft_offline_huge_page and __soft_offline_page

Merging soft_offline_huge_page and __soft_offline_page let us get rid of
quite some duplicated code, and makes the code much easier to follow.

Now, __soft_offline_page will handle both normal and hugetlb pages.
Signed-off-by: default avatarOscar Salvador <osalvador@suse.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Acked-by: default avatarNaoya Horiguchi <naoya.horiguchi@nec.com>
Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Cc: Aristeu Rozanski <aris@ruivo.org>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Dmitry Yakunin <zeil@yandex-team.ru>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Oscar Salvador <osalvador@suse.com>
Cc: Qian Cai <cai@lca.pw>
Cc: Tony Luck <tony.luck@intel.com>
Link: https://lkml.kernel.org/r/20200922135650.1634-11-osalvador@suse.deSigned-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 79f5f8fa
...@@ -65,13 +65,31 @@ int sysctl_memory_failure_recovery __read_mostly = 1; ...@@ -65,13 +65,31 @@ int sysctl_memory_failure_recovery __read_mostly = 1;
atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0); atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
static void page_handle_poison(struct page *page, bool release) static bool page_handle_poison(struct page *page, bool hugepage_or_freepage, bool release)
{ {
if (hugepage_or_freepage) {
/*
* Doing this check for free pages is also fine since dissolve_free_huge_page
* returns 0 for non-hugetlb pages as well.
*/
if (dissolve_free_huge_page(page) || !take_page_off_buddy(page))
/*
* We could fail to take off the target page from buddy
* for example due to racy page allocaiton, but that's
* acceptable because soft-offlined page is not broken
* and if someone really want to use it, they should
* take it.
*/
return false;
}
SetPageHWPoison(page); SetPageHWPoison(page);
if (release) if (release)
put_page(page); put_page(page);
page_ref_inc(page); page_ref_inc(page);
num_poisoned_pages_inc(); num_poisoned_pages_inc();
return true;
} }
#if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE) #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
...@@ -1725,63 +1743,51 @@ static int get_any_page(struct page *page, unsigned long pfn, int flags) ...@@ -1725,63 +1743,51 @@ static int get_any_page(struct page *page, unsigned long pfn, int flags)
return ret; return ret;
} }
static int soft_offline_huge_page(struct page *page, int flags) static bool isolate_page(struct page *page, struct list_head *pagelist)
{ {
int ret; bool isolated = false;
unsigned long pfn = page_to_pfn(page); bool lru = PageLRU(page);
struct page *hpage = compound_head(page);
LIST_HEAD(pagelist);
/* if (PageHuge(page)) {
* This double-check of PageHWPoison is to avoid the race with isolated = isolate_huge_page(page, pagelist);
* memory_failure(). See also comment in __soft_offline_page(). } else {
*/ if (lru)
lock_page(hpage); isolated = !isolate_lru_page(page);
if (PageHWPoison(hpage)) { else
unlock_page(hpage); isolated = !isolate_movable_page(page, ISOLATE_UNEVICTABLE);
put_page(hpage);
pr_info("soft offline: %#lx hugepage already poisoned\n", pfn); if (isolated)
return -EBUSY; list_add(&page->lru, pagelist);
} }
unlock_page(hpage);
ret = isolate_huge_page(hpage, &pagelist); if (isolated && lru)
inc_node_page_state(page, NR_ISOLATED_ANON +
page_is_file_lru(page));
/* /*
* get_any_page() and isolate_huge_page() takes a refcount each, * If we succeed to isolate the page, we grabbed another refcount on
* so need to drop one here. * the page, so we can safely drop the one we got from get_any_pages().
* If we failed to isolate the page, it means that we cannot go further
* and we will return an error, so drop the reference we got from
* get_any_pages() as well.
*/ */
put_page(hpage); put_page(page);
if (!ret) { return isolated;
pr_info("soft offline: %#lx hugepage failed to isolate\n", pfn);
return -EBUSY;
}
ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL,
MIGRATE_SYNC, MR_MEMORY_FAILURE);
if (ret) {
pr_info("soft offline: %#lx: hugepage migration failed %d, type %lx (%pGp)\n",
pfn, ret, page->flags, &page->flags);
if (!list_empty(&pagelist))
putback_movable_pages(&pagelist);
if (ret > 0)
ret = -EIO;
} else {
/*
* We set PG_hwpoison only when we were able to take the page
* off the buddy.
*/
if (!dissolve_free_huge_page(page) && take_page_off_buddy(page))
page_handle_poison(page, false);
else
ret = -EBUSY;
}
return ret;
} }
static int __soft_offline_page(struct page *page, int flags) /*
* __soft_offline_page handles hugetlb-pages and non-hugetlb pages.
* If the page is a non-dirty unmapped page-cache page, it simply invalidates.
* If the page is mapped, it migrates the contents over.
*/
static int __soft_offline_page(struct page *page)
{ {
int ret; int ret = 0;
unsigned long pfn = page_to_pfn(page); unsigned long pfn = page_to_pfn(page);
struct page *hpage = compound_head(page);
char const *msg_page[] = {"page", "hugepage"};
bool huge = PageHuge(page);
LIST_HEAD(pagelist);
/* /*
* Check PageHWPoison again inside page lock because PageHWPoison * Check PageHWPoison again inside page lock because PageHWPoison
...@@ -1790,98 +1796,74 @@ static int __soft_offline_page(struct page *page, int flags) ...@@ -1790,98 +1796,74 @@ static int __soft_offline_page(struct page *page, int flags)
* so there's no race between soft_offline_page() and memory_failure(). * so there's no race between soft_offline_page() and memory_failure().
*/ */
lock_page(page); lock_page(page);
wait_on_page_writeback(page); if (!PageHuge(page))
wait_on_page_writeback(page);
if (PageHWPoison(page)) { if (PageHWPoison(page)) {
unlock_page(page); unlock_page(page);
put_page(page); put_page(page);
pr_info("soft offline: %#lx page already poisoned\n", pfn); pr_info("soft offline: %#lx page already poisoned\n", pfn);
return -EBUSY; return -EBUSY;
} }
/*
* Try to invalidate first. This should work for if (!PageHuge(page))
* non dirty unmapped page cache pages. /*
*/ * Try to invalidate first. This should work for
ret = invalidate_inode_page(page); * non dirty unmapped page cache pages.
*/
ret = invalidate_inode_page(page);
unlock_page(page); unlock_page(page);
/* /*
* RED-PEN would be better to keep it isolated here, but we * RED-PEN would be better to keep it isolated here, but we
* would need to fix isolation locking first. * would need to fix isolation locking first.
*/ */
if (ret == 1) { if (ret) {
pr_info("soft_offline: %#lx: invalidated\n", pfn); pr_info("soft_offline: %#lx: invalidated\n", pfn);
page_handle_poison(page, true); page_handle_poison(page, false, true);
return 0; return 0;
} }
/* if (isolate_page(hpage, &pagelist)) {
* Simple invalidation didn't work.
* Try to migrate to a new page instead. migrate.c
* handles a large number of cases for us.
*/
if (PageLRU(page))
ret = isolate_lru_page(page);
else
ret = isolate_movable_page(page, ISOLATE_UNEVICTABLE);
/*
* Drop page reference which is came from get_any_page()
* successful isolate_lru_page() already took another one.
*/
put_page(page);
if (!ret) {
LIST_HEAD(pagelist);
/*
* After isolated lru page, the PageLRU will be cleared,
* so use !__PageMovable instead for LRU page's mapping
* cannot have PAGE_MAPPING_MOVABLE.
*/
if (!__PageMovable(page))
inc_node_page_state(page, NR_ISOLATED_ANON +
page_is_file_lru(page));
list_add(&page->lru, &pagelist);
ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL, ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL,
MIGRATE_SYNC, MR_MEMORY_FAILURE); MIGRATE_SYNC, MR_MEMORY_FAILURE);
if (!ret) { if (!ret) {
page_handle_poison(page, true); bool release = !huge;
if (!page_handle_poison(page, huge, release))
ret = -EBUSY;
} else { } else {
if (!list_empty(&pagelist)) if (!list_empty(&pagelist))
putback_movable_pages(&pagelist); putback_movable_pages(&pagelist);
pr_info("soft offline: %#lx: migration failed %d, type %lx (%pGp)\n", pr_info("soft offline: %#lx: %s migration failed %d, type %lx (%pGp)\n",
pfn, ret, page->flags, &page->flags); pfn, msg_page[huge], ret, page->flags, &page->flags);
if (ret > 0) if (ret > 0)
ret = -EIO; ret = -EIO;
} }
} else { } else {
pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx (%pGp)\n", pr_info("soft offline: %#lx: %s isolation failed: %d, page count %d, type %lx (%pGp)\n",
pfn, ret, page_count(page), page->flags, &page->flags); pfn, msg_page[huge], ret, page_count(page), page->flags, &page->flags);
ret = -EBUSY;
} }
return ret; return ret;
} }
static int soft_offline_in_use_page(struct page *page, int flags) static int soft_offline_in_use_page(struct page *page)
{ {
int ret;
struct page *hpage = compound_head(page); struct page *hpage = compound_head(page);
if (!PageHuge(page) && PageTransHuge(hpage)) if (!PageHuge(page) && PageTransHuge(hpage))
if (try_to_split_thp_page(page, "soft offline") < 0) if (try_to_split_thp_page(page, "soft offline") < 0)
return -EBUSY; return -EBUSY;
return __soft_offline_page(page);
if (PageHuge(page))
ret = soft_offline_huge_page(page, flags);
else
ret = __soft_offline_page(page, flags);
return ret;
} }
static int soft_offline_free_page(struct page *page) static int soft_offline_free_page(struct page *page)
{ {
int rc = -EBUSY; int rc = 0;
if (!dissolve_free_huge_page(page) && take_page_off_buddy(page)) { if (!page_handle_poison(page, true, false))
page_handle_poison(page, false); rc = -EBUSY;
rc = 0;
}
return rc; return rc;
} }
...@@ -1932,7 +1914,7 @@ int soft_offline_page(unsigned long pfn, int flags) ...@@ -1932,7 +1914,7 @@ int soft_offline_page(unsigned long pfn, int flags)
put_online_mems(); put_online_mems();
if (ret > 0) if (ret > 0)
ret = soft_offline_in_use_page(page, flags); ret = soft_offline_in_use_page(page);
else if (ret == 0) else if (ret == 0)
ret = soft_offline_free_page(page); ret = soft_offline_free_page(page);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment