Commit b65d4adb authored by Muchun Song's avatar Muchun Song Committed by Linus Torvalds

mm: hugetlb: defer freeing of HugeTLB pages

In the subsequent patch, we should allocate the vmemmap pages when freeing
a HugeTLB page.  But update_and_free_page() can be called under any
context, so we cannot use GFP_KERNEL to allocate vmemmap pages.  However,
we can defer the actual freeing in a kworker to prevent from using
GFP_ATOMIC to allocate the vmemmap pages.

The __update_and_free_page() is where the call to allocate vmemmmap pages
will be inserted.

Link: https://lkml.kernel.org/r/20210510030027.56044-6-songmuchun@bytedance.comSigned-off-by: default avatarMuchun Song <songmuchun@bytedance.com>
Reviewed-by: default avatarMike Kravetz <mike.kravetz@oracle.com>
Reviewed-by: default avatarOscar Salvador <osalvador@suse.de>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: Balbir Singh <bsingharora@gmail.com>
Cc: Barry Song <song.bao.hua@hisilicon.com>
Cc: Bodeddula Balasubramaniam <bodeddub@amazon.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Chen Huang <chenhuang5@huawei.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: David Rientjes <rientjes@google.com>
Cc: HORIGUCHI NAOYA <naoya.horiguchi@nec.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Joao Martins <joao.m.martins@oracle.com>
Cc: Joerg Roedel <jroedel@suse.de>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mina Almasry <almasrymina@google.com>
Cc: Oliver Neukum <oneukum@suse.com>
Cc: Paul E. McKenney <paulmck@kernel.org>
Cc: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Randy Dunlap <rdunlap@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Xiongchun Duan <duanxiongchun@bytedance.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent f41f2ed4
......@@ -1376,7 +1376,7 @@ static void remove_hugetlb_page(struct hstate *h, struct page *page,
h->nr_huge_pages_node[nid]--;
}
static void update_and_free_page(struct hstate *h, struct page *page)
static void __update_and_free_page(struct hstate *h, struct page *page)
{
int i;
struct page *subpage = page;
......@@ -1399,12 +1399,79 @@ static void update_and_free_page(struct hstate *h, struct page *page)
}
}
/*
* As update_and_free_page() can be called under any context, so we cannot
* use GFP_KERNEL to allocate vmemmap pages. However, we can defer the
* actual freeing in a workqueue to prevent from using GFP_ATOMIC to allocate
* the vmemmap pages.
*
* free_hpage_workfn() locklessly retrieves the linked list of pages to be
* freed and frees them one-by-one. As the page->mapping pointer is going
* to be cleared in free_hpage_workfn() anyway, it is reused as the llist_node
* structure of a lockless linked list of huge pages to be freed.
*/
static LLIST_HEAD(hpage_freelist);
static void free_hpage_workfn(struct work_struct *work)
{
struct llist_node *node;
node = llist_del_all(&hpage_freelist);
while (node) {
struct page *page;
struct hstate *h;
page = container_of((struct address_space **)node,
struct page, mapping);
node = node->next;
page->mapping = NULL;
/*
* The VM_BUG_ON_PAGE(!PageHuge(page), page) in page_hstate()
* is going to trigger because a previous call to
* remove_hugetlb_page() will set_compound_page_dtor(page,
* NULL_COMPOUND_DTOR), so do not use page_hstate() directly.
*/
h = size_to_hstate(page_size(page));
__update_and_free_page(h, page);
cond_resched();
}
}
static DECLARE_WORK(free_hpage_work, free_hpage_workfn);
static inline void flush_free_hpage_work(struct hstate *h)
{
if (free_vmemmap_pages_per_hpage(h))
flush_work(&free_hpage_work);
}
static void update_and_free_page(struct hstate *h, struct page *page,
bool atomic)
{
if (!free_vmemmap_pages_per_hpage(h) || !atomic) {
__update_and_free_page(h, page);
return;
}
/*
* Defer freeing to avoid using GFP_ATOMIC to allocate vmemmap pages.
*
* Only call schedule_work() if hpage_freelist is previously
* empty. Otherwise, schedule_work() had been called but the workfn
* hasn't retrieved the list yet.
*/
if (llist_add((struct llist_node *)&page->mapping, &hpage_freelist))
schedule_work(&free_hpage_work);
}
static void update_and_free_pages_bulk(struct hstate *h, struct list_head *list)
{
struct page *page, *t_page;
list_for_each_entry_safe(page, t_page, list, lru) {
update_and_free_page(h, page);
update_and_free_page(h, page, false);
cond_resched();
}
}
......@@ -1471,12 +1538,12 @@ void free_huge_page(struct page *page)
if (HPageTemporary(page)) {
remove_hugetlb_page(h, page, false);
spin_unlock_irqrestore(&hugetlb_lock, flags);
update_and_free_page(h, page);
update_and_free_page(h, page, true);
} else if (h->surplus_huge_pages_node[nid]) {
/* remove the page from active list */
remove_hugetlb_page(h, page, true);
spin_unlock_irqrestore(&hugetlb_lock, flags);
update_and_free_page(h, page);
update_and_free_page(h, page, true);
} else {
arch_clear_hugepage_flags(page);
enqueue_huge_page(h, page);
......@@ -1795,7 +1862,7 @@ int dissolve_free_huge_page(struct page *page)
remove_hugetlb_page(h, head, false);
h->max_huge_pages--;
spin_unlock_irq(&hugetlb_lock);
update_and_free_page(h, head);
update_and_free_page(h, head, false);
return 0;
}
out:
......@@ -2411,14 +2478,14 @@ static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page,
* Pages have been replaced, we can safely free the old one.
*/
spin_unlock_irq(&hugetlb_lock);
update_and_free_page(h, old_page);
update_and_free_page(h, old_page, false);
}
return ret;
free_new:
spin_unlock_irq(&hugetlb_lock);
update_and_free_page(h, new_page);
update_and_free_page(h, new_page, false);
return ret;
}
......@@ -2832,6 +2899,7 @@ static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
* pages in hstate via the proc/sysfs interfaces.
*/
mutex_lock(&h->resize_lock);
flush_free_hpage_work(h);
spin_lock_irq(&hugetlb_lock);
/*
......@@ -2941,6 +3009,7 @@ static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
/* free the pages after dropping lock */
spin_unlock_irq(&hugetlb_lock);
update_and_free_pages_bulk(h, &page_list);
flush_free_hpage_work(h);
spin_lock_irq(&hugetlb_lock);
while (count < persistent_huge_pages(h)) {
......
......@@ -180,18 +180,6 @@
#define RESERVE_VMEMMAP_NR 2U
#define RESERVE_VMEMMAP_SIZE (RESERVE_VMEMMAP_NR << PAGE_SHIFT)
/*
* How many vmemmap pages associated with a HugeTLB page that can be freed
* to the buddy allocator.
*
* Todo: Returns zero for now, which means the feature is disabled. We will
* enable it once all the infrastructure is there.
*/
static inline unsigned int free_vmemmap_pages_per_hpage(struct hstate *h)
{
return 0;
}
static inline unsigned long free_vmemmap_pages_size_per_hpage(struct hstate *h)
{
return (unsigned long)free_vmemmap_pages_per_hpage(h) << PAGE_SHIFT;
......
......@@ -12,9 +12,26 @@
#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
void free_huge_page_vmemmap(struct hstate *h, struct page *head);
/*
* How many vmemmap pages associated with a HugeTLB page that can be freed
* to the buddy allocator.
*
* Todo: Returns zero for now, which means the feature is disabled. We will
* enable it once all the infrastructure is there.
*/
static inline unsigned int free_vmemmap_pages_per_hpage(struct hstate *h)
{
return 0;
}
#else
static inline void free_huge_page_vmemmap(struct hstate *h, struct page *head)
{
}
static inline unsigned int free_vmemmap_pages_per_hpage(struct hstate *h)
{
return 0;
}
#endif /* CONFIG_HUGETLB_PAGE_FREE_VMEMMAP */
#endif /* _LINUX_HUGETLB_VMEMMAP_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment