Commit 8dc4a8f1 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

mm: convert free_transhuge_folio() to folio_undo_large_rmappable()

Indirect calls are expensive, thanks to Spectre.  Test for
TRANSHUGE_PAGE_DTOR and destroy the folio appropriately.  Move the
free_compound_page() call into destroy_large_folio() to simplify later
patches.

Link: https://lkml.kernel.org/r/20230816151201.3655946-5-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Cc: Yanteng Si <siyanteng@loongson.cn>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 454a00c4
...@@ -141,8 +141,6 @@ unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr, ...@@ -141,8 +141,6 @@ unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags); unsigned long len, unsigned long pgoff, unsigned long flags);
void prep_transhuge_page(struct page *page); void prep_transhuge_page(struct page *page);
void free_transhuge_page(struct page *page);
bool can_split_folio(struct folio *folio, int *pextra_pins); bool can_split_folio(struct folio *folio, int *pextra_pins);
int split_huge_page_to_list(struct page *page, struct list_head *list); int split_huge_page_to_list(struct page *page, struct list_head *list);
static inline int split_huge_page(struct page *page) static inline int split_huge_page(struct page *page)
......
...@@ -1253,9 +1253,7 @@ enum compound_dtor_id { ...@@ -1253,9 +1253,7 @@ enum compound_dtor_id {
#ifdef CONFIG_HUGETLB_PAGE #ifdef CONFIG_HUGETLB_PAGE
HUGETLB_PAGE_DTOR, HUGETLB_PAGE_DTOR,
#endif #endif
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
TRANSHUGE_PAGE_DTOR, TRANSHUGE_PAGE_DTOR,
#endif
NR_COMPOUND_DTORS, NR_COMPOUND_DTORS,
}; };
......
...@@ -2776,10 +2776,9 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) ...@@ -2776,10 +2776,9 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
return ret; return ret;
} }
void free_transhuge_page(struct page *page) void folio_undo_large_rmappable(struct folio *folio)
{ {
struct folio *folio = (struct folio *)page; struct deferred_split *ds_queue;
struct deferred_split *ds_queue = get_deferred_split_queue(folio);
unsigned long flags; unsigned long flags;
/* /*
...@@ -2787,15 +2786,16 @@ void free_transhuge_page(struct page *page) ...@@ -2787,15 +2786,16 @@ void free_transhuge_page(struct page *page)
* deferred_list. If folio is not in deferred_list, it's safe * deferred_list. If folio is not in deferred_list, it's safe
* to check without acquiring the split_queue_lock. * to check without acquiring the split_queue_lock.
*/ */
if (data_race(!list_empty(&folio->_deferred_list))) { if (data_race(list_empty(&folio->_deferred_list)))
spin_lock_irqsave(&ds_queue->split_queue_lock, flags); return;
if (!list_empty(&folio->_deferred_list)) {
ds_queue->split_queue_len--; ds_queue = get_deferred_split_queue(folio);
list_del(&folio->_deferred_list); spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
} if (!list_empty(&folio->_deferred_list)) {
spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); ds_queue->split_queue_len--;
list_del(&folio->_deferred_list);
} }
free_compound_page(page); spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
} }
void deferred_split_folio(struct folio *folio) void deferred_split_folio(struct folio *folio)
......
...@@ -413,6 +413,8 @@ static inline void folio_set_order(struct folio *folio, unsigned int order) ...@@ -413,6 +413,8 @@ static inline void folio_set_order(struct folio *folio, unsigned int order)
#endif #endif
} }
void folio_undo_large_rmappable(struct folio *folio);
static inline void prep_compound_head(struct page *page, unsigned int order) static inline void prep_compound_head(struct page *page, unsigned int order)
{ {
struct folio *folio = (struct folio *)page; struct folio *folio = (struct folio *)page;
......
...@@ -287,9 +287,6 @@ const char * const migratetype_names[MIGRATE_TYPES] = { ...@@ -287,9 +287,6 @@ const char * const migratetype_names[MIGRATE_TYPES] = {
static compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS] = { static compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS] = {
[NULL_COMPOUND_DTOR] = NULL, [NULL_COMPOUND_DTOR] = NULL,
[COMPOUND_PAGE_DTOR] = free_compound_page, [COMPOUND_PAGE_DTOR] = free_compound_page,
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
[TRANSHUGE_PAGE_DTOR] = free_transhuge_page,
#endif
}; };
int min_free_kbytes = 1024; int min_free_kbytes = 1024;
...@@ -614,6 +611,12 @@ void destroy_large_folio(struct folio *folio) ...@@ -614,6 +611,12 @@ void destroy_large_folio(struct folio *folio)
return; return;
} }
if (folio_test_transhuge(folio) && dtor == TRANSHUGE_PAGE_DTOR) {
folio_undo_large_rmappable(folio);
free_compound_page(&folio->page);
return;
}
VM_BUG_ON_FOLIO(dtor >= NR_COMPOUND_DTORS, folio); VM_BUG_ON_FOLIO(dtor >= NR_COMPOUND_DTORS, folio);
compound_page_dtors[dtor](&folio->page); compound_page_dtors[dtor](&folio->page);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment