Commit 42a346b4 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

hugetlb: remove mention of destructors

We no longer have destructors or dtors, merely a page flag (technically a
page type flag, but that's an implementation detail).  Remove
__clear_hugetlb_destructor, fix up comments and the occasional variable
name.

Link: https://lkml.kernel.org/r/20240321142448.1645400-10-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarDavid Hildenbrand <david@redhat.com>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Oscar Salvador <osalvador@suse.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 8f790d0c
...@@ -1619,19 +1619,11 @@ static inline void destroy_compound_gigantic_folio(struct folio *folio, ...@@ -1619,19 +1619,11 @@ static inline void destroy_compound_gigantic_folio(struct folio *folio,
unsigned int order) { } unsigned int order) { }
#endif #endif
static inline void __clear_hugetlb_destructor(struct hstate *h,
struct folio *folio)
{
lockdep_assert_held(&hugetlb_lock);
__folio_clear_hugetlb(folio);
}
/* /*
* Remove hugetlb folio from lists. * Remove hugetlb folio from lists.
* If vmemmap exists for the folio, update dtor so that the folio appears * If vmemmap exists for the folio, clear the hugetlb flag so that the
* as just a compound page. Otherwise, wait until after allocating vmemmap * folio appears as just a compound page. Otherwise, wait until after
* to update dtor. * allocating vmemmap to clear the flag.
* *
* A reference is held on the folio, except in the case of demote. * A reference is held on the folio, except in the case of demote.
* *
...@@ -1662,12 +1654,12 @@ static void __remove_hugetlb_folio(struct hstate *h, struct folio *folio, ...@@ -1662,12 +1654,12 @@ static void __remove_hugetlb_folio(struct hstate *h, struct folio *folio,
} }
/* /*
* We can only clear the hugetlb destructor after allocating vmemmap * We can only clear the hugetlb flag after allocating vmemmap
* pages. Otherwise, someone (memory error handling) may try to write * pages. Otherwise, someone (memory error handling) may try to write
* to tail struct pages. * to tail struct pages.
*/ */
if (!folio_test_hugetlb_vmemmap_optimized(folio)) if (!folio_test_hugetlb_vmemmap_optimized(folio))
__clear_hugetlb_destructor(h, folio); __folio_clear_hugetlb(folio);
/* /*
* In the case of demote we do not ref count the page as it will soon * In the case of demote we do not ref count the page as it will soon
...@@ -1741,7 +1733,7 @@ static void add_hugetlb_folio(struct hstate *h, struct folio *folio, ...@@ -1741,7 +1733,7 @@ static void add_hugetlb_folio(struct hstate *h, struct folio *folio,
static void __update_and_free_hugetlb_folio(struct hstate *h, static void __update_and_free_hugetlb_folio(struct hstate *h,
struct folio *folio) struct folio *folio)
{ {
bool clear_dtor = folio_test_hugetlb_vmemmap_optimized(folio); bool clear_flag = folio_test_hugetlb_vmemmap_optimized(folio);
if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
return; return;
...@@ -1754,11 +1746,11 @@ static void __update_and_free_hugetlb_folio(struct hstate *h, ...@@ -1754,11 +1746,11 @@ static void __update_and_free_hugetlb_folio(struct hstate *h,
return; return;
/* /*
* If folio is not vmemmap optimized (!clear_dtor), then the folio * If folio is not vmemmap optimized (!clear_flag), then the folio
* is no longer identified as a hugetlb page. hugetlb_vmemmap_restore_folio * is no longer identified as a hugetlb page. hugetlb_vmemmap_restore_folio
* can only be passed hugetlb pages and will BUG otherwise. * can only be passed hugetlb pages and will BUG otherwise.
*/ */
if (clear_dtor && hugetlb_vmemmap_restore_folio(h, folio)) { if (clear_flag && hugetlb_vmemmap_restore_folio(h, folio)) {
spin_lock_irq(&hugetlb_lock); spin_lock_irq(&hugetlb_lock);
/* /*
* If we cannot allocate vmemmap pages, just refuse to free the * If we cannot allocate vmemmap pages, just refuse to free the
...@@ -1779,11 +1771,11 @@ static void __update_and_free_hugetlb_folio(struct hstate *h, ...@@ -1779,11 +1771,11 @@ static void __update_and_free_hugetlb_folio(struct hstate *h,
/* /*
* If vmemmap pages were allocated above, then we need to clear the * If vmemmap pages were allocated above, then we need to clear the
* hugetlb destructor under the hugetlb lock. * hugetlb flag under the hugetlb lock.
*/ */
if (folio_test_hugetlb(folio)) { if (folio_test_hugetlb(folio)) {
spin_lock_irq(&hugetlb_lock); spin_lock_irq(&hugetlb_lock);
__clear_hugetlb_destructor(h, folio); __folio_clear_hugetlb(folio);
spin_unlock_irq(&hugetlb_lock); spin_unlock_irq(&hugetlb_lock);
} }
...@@ -1885,7 +1877,7 @@ static void bulk_vmemmap_restore_error(struct hstate *h, ...@@ -1885,7 +1877,7 @@ static void bulk_vmemmap_restore_error(struct hstate *h,
list_for_each_entry_safe(folio, t_folio, non_hvo_folios, lru) { list_for_each_entry_safe(folio, t_folio, non_hvo_folios, lru) {
list_del(&folio->lru); list_del(&folio->lru);
spin_lock_irq(&hugetlb_lock); spin_lock_irq(&hugetlb_lock);
__clear_hugetlb_destructor(h, folio); __folio_clear_hugetlb(folio);
spin_unlock_irq(&hugetlb_lock); spin_unlock_irq(&hugetlb_lock);
update_and_free_hugetlb_folio(h, folio, false); update_and_free_hugetlb_folio(h, folio, false);
cond_resched(); cond_resched();
...@@ -1910,7 +1902,7 @@ static void bulk_vmemmap_restore_error(struct hstate *h, ...@@ -1910,7 +1902,7 @@ static void bulk_vmemmap_restore_error(struct hstate *h,
} else { } else {
list_del(&folio->lru); list_del(&folio->lru);
spin_lock_irq(&hugetlb_lock); spin_lock_irq(&hugetlb_lock);
__clear_hugetlb_destructor(h, folio); __folio_clear_hugetlb(folio);
spin_unlock_irq(&hugetlb_lock); spin_unlock_irq(&hugetlb_lock);
update_and_free_hugetlb_folio(h, folio, false); update_and_free_hugetlb_folio(h, folio, false);
cond_resched(); cond_resched();
...@@ -1943,14 +1935,14 @@ static void update_and_free_pages_bulk(struct hstate *h, ...@@ -1943,14 +1935,14 @@ static void update_and_free_pages_bulk(struct hstate *h,
* should only be pages on the non_hvo_folios list. * should only be pages on the non_hvo_folios list.
* Do note that the non_hvo_folios list could be empty. * Do note that the non_hvo_folios list could be empty.
* Without HVO enabled, ret will be 0 and there is no need to call * Without HVO enabled, ret will be 0 and there is no need to call
* __clear_hugetlb_destructor as this was done previously. * __folio_clear_hugetlb as this was done previously.
*/ */
VM_WARN_ON(!list_empty(folio_list)); VM_WARN_ON(!list_empty(folio_list));
VM_WARN_ON(ret < 0); VM_WARN_ON(ret < 0);
if (!list_empty(&non_hvo_folios) && ret) { if (!list_empty(&non_hvo_folios) && ret) {
spin_lock_irq(&hugetlb_lock); spin_lock_irq(&hugetlb_lock);
list_for_each_entry(folio, &non_hvo_folios, lru) list_for_each_entry(folio, &non_hvo_folios, lru)
__clear_hugetlb_destructor(h, folio); __folio_clear_hugetlb(folio);
spin_unlock_irq(&hugetlb_lock); spin_unlock_irq(&hugetlb_lock);
} }
...@@ -1975,7 +1967,7 @@ void free_huge_folio(struct folio *folio) ...@@ -1975,7 +1967,7 @@ void free_huge_folio(struct folio *folio)
{ {
/* /*
* Can't pass hstate in here because it is called from the * Can't pass hstate in here because it is called from the
* compound page destructor. * generic mm code.
*/ */
struct hstate *h = folio_hstate(folio); struct hstate *h = folio_hstate(folio);
int nid = folio_nid(folio); int nid = folio_nid(folio);
...@@ -2125,7 +2117,7 @@ static bool __prep_compound_gigantic_folio(struct folio *folio, ...@@ -2125,7 +2117,7 @@ static bool __prep_compound_gigantic_folio(struct folio *folio,
set_compound_head(p, &folio->page); set_compound_head(p, &folio->page);
} }
__folio_set_head(folio); __folio_set_head(folio);
/* we rely on prep_new_hugetlb_folio to set the destructor */ /* we rely on prep_new_hugetlb_folio to set the hugetlb flag */
folio_set_order(folio, order); folio_set_order(folio, order);
atomic_set(&folio->_entire_mapcount, -1); atomic_set(&folio->_entire_mapcount, -1);
atomic_set(&folio->_nr_pages_mapped, 0); atomic_set(&folio->_nr_pages_mapped, 0);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment