Commit 345c62d1 authored by Sidhartha Kumar's avatar Sidhartha Kumar Committed by Andrew Morton

mm/hugetlb: convert move_hugetlb_state() to folios

Clean up unmap_and_move_huge_page() by converting move_hugetlb_state() to
take in folios.

[akpm@linux-foundation.org: fix CONFIG_HUGETLB_PAGE=n build]
Link: https://lkml.kernel.org/r/20221101223059.460937-10-sidhartha.kumar@oracle.comSigned-off-by: default avatarSidhartha Kumar <sidhartha.kumar@oracle.com>
Reviewed-by: default avatarMike Kravetz <mike.kravetz@oracle.com>
Reviewed-by: default avatarMuchun Song <songmuchun@bytedance.com>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Cc: Bui Quang Minh <minhquangbui99@gmail.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Mina Almasry <almasrymina@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 541b7c7b
...@@ -187,7 +187,7 @@ int get_hwpoison_huge_page(struct page *page, bool *hugetlb, bool unpoison); ...@@ -187,7 +187,7 @@ int get_hwpoison_huge_page(struct page *page, bool *hugetlb, bool unpoison);
int get_huge_page_for_hwpoison(unsigned long pfn, int flags, int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
bool *migratable_cleared); bool *migratable_cleared);
void putback_active_hugepage(struct page *page); void putback_active_hugepage(struct page *page);
void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason); void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason);
void free_huge_page(struct page *page); void free_huge_page(struct page *page);
void hugetlb_fix_reserve_counts(struct inode *inode); void hugetlb_fix_reserve_counts(struct inode *inode);
extern struct mutex *hugetlb_fault_mutex_table; extern struct mutex *hugetlb_fault_mutex_table;
...@@ -407,8 +407,8 @@ static inline void putback_active_hugepage(struct page *page) ...@@ -407,8 +407,8 @@ static inline void putback_active_hugepage(struct page *page)
{ {
} }
static inline void move_hugetlb_state(struct page *oldpage, static inline void move_hugetlb_state(struct folio *old_folio,
struct page *newpage, int reason) struct folio *new_folio, int reason)
{ {
} }
...@@ -991,6 +991,11 @@ void hugetlb_unregister_node(struct node *node); ...@@ -991,6 +991,11 @@ void hugetlb_unregister_node(struct node *node);
#else /* CONFIG_HUGETLB_PAGE */ #else /* CONFIG_HUGETLB_PAGE */
struct hstate {}; struct hstate {};
static inline struct hugepage_subpool *hugetlb_folio_subpool(struct folio *folio)
{
return NULL;
}
static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage) static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage)
{ {
return NULL; return NULL;
......
...@@ -7324,15 +7324,15 @@ void putback_active_hugepage(struct page *page) ...@@ -7324,15 +7324,15 @@ void putback_active_hugepage(struct page *page)
put_page(page); put_page(page);
} }
void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason) void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason)
{ {
struct hstate *h = page_hstate(oldpage); struct hstate *h = folio_hstate(old_folio);
hugetlb_cgroup_migrate(page_folio(oldpage), page_folio(newpage)); hugetlb_cgroup_migrate(old_folio, new_folio);
set_page_owner_migrate_reason(newpage, reason); set_page_owner_migrate_reason(&new_folio->page, reason);
/* /*
* transfer temporary state of the new huge page. This is * transfer temporary state of the new hugetlb folio. This is
* reverse to other transitions because the newpage is going to * reverse to other transitions because the newpage is going to
* be final while the old one will be freed so it takes over * be final while the old one will be freed so it takes over
* the temporary status. * the temporary status.
...@@ -7341,12 +7341,14 @@ void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason) ...@@ -7341,12 +7341,14 @@ void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason)
* here as well otherwise the global surplus count will not match * here as well otherwise the global surplus count will not match
* the per-node's. * the per-node's.
*/ */
if (HPageTemporary(newpage)) { if (folio_test_hugetlb_temporary(new_folio)) {
int old_nid = page_to_nid(oldpage); int old_nid = folio_nid(old_folio);
int new_nid = page_to_nid(newpage); int new_nid = folio_nid(new_folio);
folio_set_hugetlb_temporary(old_folio);
folio_clear_hugetlb_temporary(new_folio);
SetHPageTemporary(oldpage);
ClearHPageTemporary(newpage);
/* /*
* There is no need to transfer the per-node surplus state * There is no need to transfer the per-node surplus state
......
...@@ -1298,7 +1298,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page, ...@@ -1298,7 +1298,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
* folio_mapping() set, hugetlbfs specific move page routine will not * folio_mapping() set, hugetlbfs specific move page routine will not
* be called and we could leak usage counts for subpools. * be called and we could leak usage counts for subpools.
*/ */
if (hugetlb_page_subpool(hpage) && !folio_mapping(src)) { if (hugetlb_folio_subpool(src) && !folio_mapping(src)) {
rc = -EBUSY; rc = -EBUSY;
goto out_unlock; goto out_unlock;
} }
...@@ -1348,7 +1348,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page, ...@@ -1348,7 +1348,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
put_anon_vma(anon_vma); put_anon_vma(anon_vma);
if (rc == MIGRATEPAGE_SUCCESS) { if (rc == MIGRATEPAGE_SUCCESS) {
move_hugetlb_state(hpage, new_hpage, reason); move_hugetlb_state(src, dst, reason);
put_new_page = NULL; put_new_page = NULL;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment