Commit 0bd0f9fb authored by Eric Paris's avatar Eric Paris Committed by Linus Torvalds

[PATCH] hugetlb: fix race in set_max_huge_pages for multiple updaters of nr_huge_pages

If there are multiple updaters to /proc/sys/vm/nr_hugepages simultaneously
it is possible for the nr_huge_pages variable to become incorrect.  There
is no locking in the set_max_huge_pages function around
alloc_fresh_huge_page which is able to update nr_huge_pages.  Two callers
to alloc_fresh_huge_page could race against each other as could a call to
alloc_fresh_huge_page and a call to update_and_free_page.  This patch just
expands the area covered by the hugetlb_lock to cover the call into
alloc_fresh_huge_page.  I'm not sure how we could say that a sysctl section
is performance critical where more specific locking would be needed.

My reproducer was to run a couple copies of the following script
simultaneously

while [ true ]; do
	echo 1000 > /proc/sys/vm/nr_hugepages
	echo 500 > /proc/sys/vm/nr_hugepages
	echo 750 > /proc/sys/vm/nr_hugepages
	echo 100 > /proc/sys/vm/nr_hugepages
	echo 0 > /proc/sys/vm/nr_hugepages
done

and then watch /proc/meminfo and eventually you will see things like

HugePages_Total:     100
HugePages_Free:      109

After applying the patch all seemed well.
Signed-off-by: default avatarEric Paris <eparis@redhat.com>
Acked-by: default avatarWilliam Irwin <wli@holomorphy.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 5ef897c7
...@@ -22,6 +22,10 @@ unsigned long max_huge_pages; ...@@ -22,6 +22,10 @@ unsigned long max_huge_pages;
static struct list_head hugepage_freelists[MAX_NUMNODES]; static struct list_head hugepage_freelists[MAX_NUMNODES];
static unsigned int nr_huge_pages_node[MAX_NUMNODES]; static unsigned int nr_huge_pages_node[MAX_NUMNODES];
static unsigned int free_huge_pages_node[MAX_NUMNODES]; static unsigned int free_huge_pages_node[MAX_NUMNODES];
/*
* Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
*/
static DEFINE_SPINLOCK(hugetlb_lock); static DEFINE_SPINLOCK(hugetlb_lock);
static void enqueue_huge_page(struct page *page) static void enqueue_huge_page(struct page *page)
...@@ -61,8 +65,10 @@ static struct page *alloc_fresh_huge_page(void) ...@@ -61,8 +65,10 @@ static struct page *alloc_fresh_huge_page(void)
HUGETLB_PAGE_ORDER); HUGETLB_PAGE_ORDER);
nid = (nid + 1) % num_online_nodes(); nid = (nid + 1) % num_online_nodes();
if (page) { if (page) {
spin_lock(&hugetlb_lock);
nr_huge_pages++; nr_huge_pages++;
nr_huge_pages_node[page_to_nid(page)]++; nr_huge_pages_node[page_to_nid(page)]++;
spin_unlock(&hugetlb_lock);
} }
return page; return page;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment