Commit 1121828a authored by Mike Kravetz's avatar Mike Kravetz Committed by Linus Torvalds

hugetlb: call update_and_free_page without hugetlb_lock

With the introduction of remove_hugetlb_page(), there is no need for
update_and_free_page to hold the hugetlb lock.  Change all callers to
drop the lock before calling.

With additional code modifications, this will allow loops which decrease
the huge page pool to drop the hugetlb_lock with each page to reduce
long hold times.

The ugly unlock/lock cycle in free_pool_huge_page will be removed in a
subsequent patch which restructures free_pool_huge_page.

Link: https://lkml.kernel.org/r/20210409205254.242291-6-mike.kravetz@oracle.comSigned-off-by: default avatarMike Kravetz <mike.kravetz@oracle.com>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Reviewed-by: default avatarMuchun Song <songmuchun@bytedance.com>
Reviewed-by: default avatarMiaohe Lin <linmiaohe@huawei.com>
Reviewed-by: default avatarOscar Salvador <osalvador@suse.de>
Cc: "Aneesh Kumar K . V" <aneesh.kumar@linux.ibm.com>
Cc: Barry Song <song.bao.hua@hisilicon.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Hillf Danton <hdanton@sina.com>
Cc: HORIGUCHI NAOYA <naoya.horiguchi@nec.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mina Almasry <almasrymina@google.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Roman Gushchin <guro@fb.com>
Cc: Shakeel Butt <shakeelb@google.com>
Cc: Waiman Long <longman@redhat.com>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 6eb4e88a
...@@ -1451,16 +1451,18 @@ static void __free_huge_page(struct page *page) ...@@ -1451,16 +1451,18 @@ static void __free_huge_page(struct page *page)
if (HPageTemporary(page)) { if (HPageTemporary(page)) {
remove_hugetlb_page(h, page, false); remove_hugetlb_page(h, page, false);
spin_unlock(&hugetlb_lock);
update_and_free_page(h, page); update_and_free_page(h, page);
} else if (h->surplus_huge_pages_node[nid]) { } else if (h->surplus_huge_pages_node[nid]) {
/* remove the page from active list */ /* remove the page from active list */
remove_hugetlb_page(h, page, true); remove_hugetlb_page(h, page, true);
spin_unlock(&hugetlb_lock);
update_and_free_page(h, page); update_and_free_page(h, page);
} else { } else {
arch_clear_hugepage_flags(page); arch_clear_hugepage_flags(page);
enqueue_huge_page(h, page); enqueue_huge_page(h, page);
}
spin_unlock(&hugetlb_lock); spin_unlock(&hugetlb_lock);
}
} }
/* /*
...@@ -1741,7 +1743,13 @@ static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed, ...@@ -1741,7 +1743,13 @@ static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
list_entry(h->hugepage_freelists[node].next, list_entry(h->hugepage_freelists[node].next,
struct page, lru); struct page, lru);
remove_hugetlb_page(h, page, acct_surplus); remove_hugetlb_page(h, page, acct_surplus);
/*
* unlock/lock around update_and_free_page is temporary
* and will be removed with subsequent patch.
*/
spin_unlock(&hugetlb_lock);
update_and_free_page(h, page); update_and_free_page(h, page);
spin_lock(&hugetlb_lock);
ret = 1; ret = 1;
break; break;
} }
...@@ -1810,8 +1818,9 @@ int dissolve_free_huge_page(struct page *page) ...@@ -1810,8 +1818,9 @@ int dissolve_free_huge_page(struct page *page)
} }
remove_hugetlb_page(h, page, false); remove_hugetlb_page(h, page, false);
h->max_huge_pages--; h->max_huge_pages--;
spin_unlock(&hugetlb_lock);
update_and_free_page(h, head); update_and_free_page(h, head);
rc = 0; return 0;
} }
out: out:
spin_unlock(&hugetlb_lock); spin_unlock(&hugetlb_lock);
...@@ -2563,22 +2572,34 @@ static void try_to_free_low(struct hstate *h, unsigned long count, ...@@ -2563,22 +2572,34 @@ static void try_to_free_low(struct hstate *h, unsigned long count,
nodemask_t *nodes_allowed) nodemask_t *nodes_allowed)
{ {
int i; int i;
struct page *page, *next;
LIST_HEAD(page_list);
if (hstate_is_gigantic(h)) if (hstate_is_gigantic(h))
return; return;
/*
* Collect pages to be freed on a list, and free after dropping lock
*/
for_each_node_mask(i, *nodes_allowed) { for_each_node_mask(i, *nodes_allowed) {
struct page *page, *next;
struct list_head *freel = &h->hugepage_freelists[i]; struct list_head *freel = &h->hugepage_freelists[i];
list_for_each_entry_safe(page, next, freel, lru) { list_for_each_entry_safe(page, next, freel, lru) {
if (count >= h->nr_huge_pages) if (count >= h->nr_huge_pages)
return; goto out;
if (PageHighMem(page)) if (PageHighMem(page))
continue; continue;
remove_hugetlb_page(h, page, false); remove_hugetlb_page(h, page, false);
update_and_free_page(h, page); list_add(&page->lru, &page_list);
}
} }
out:
spin_unlock(&hugetlb_lock);
list_for_each_entry_safe(page, next, &page_list, lru) {
update_and_free_page(h, page);
cond_resched();
} }
spin_lock(&hugetlb_lock);
} }
#else #else
static inline void try_to_free_low(struct hstate *h, unsigned long count, static inline void try_to_free_low(struct hstate *h, unsigned long count,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment