Commit 79789db0 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Linus Torvalds

mm: Make copy_huge_page() always available

Rewrite copy_huge_page() and move it into mm/util.c so it's always
available.  Fixes an exposure of uninitialised memory on configurations
with HUGETLB and UFFD enabled and MIGRATION disabled.

Fixes: 8cc5fcbb ("mm, hugetlb: fix racy resv_huge_pages underflow on UFFDIO_COPY")
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarMike Kravetz <mike.kravetz@oracle.com>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent efdb6720
......@@ -51,7 +51,6 @@ extern int migrate_huge_page_move_mapping(struct address_space *mapping,
struct page *newpage, struct page *page);
extern int migrate_page_move_mapping(struct address_space *mapping,
struct page *newpage, struct page *page, int extra_count);
extern void copy_huge_page(struct page *dst, struct page *src);
#else
static inline void putback_movable_pages(struct list_head *l) {}
......@@ -77,10 +76,6 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
{
return -ENOSYS;
}
static inline void copy_huge_page(struct page *dst, struct page *src)
{
}
#endif /* CONFIG_MIGRATION */
#ifdef CONFIG_COMPACTION
......
......@@ -906,6 +906,7 @@ void __put_page(struct page *page);
void put_pages_list(struct list_head *pages);
void split_page(struct page *page, unsigned int order);
void copy_huge_page(struct page *dst, struct page *src);
/*
* Compound pages have a destructor function. Provide a
......
......@@ -536,54 +536,6 @@ int migrate_huge_page_move_mapping(struct address_space *mapping,
return MIGRATEPAGE_SUCCESS;
}
/*
* Gigantic pages are so large that we do not guarantee that page++ pointer
* arithmetic will work across the entire page. We need something more
* specialized.
*/
static void __copy_gigantic_page(struct page *dst, struct page *src,
int nr_pages)
{
int i;
struct page *dst_base = dst;
struct page *src_base = src;
for (i = 0; i < nr_pages; ) {
cond_resched();
copy_highpage(dst, src);
i++;
dst = mem_map_next(dst, dst_base, i);
src = mem_map_next(src, src_base, i);
}
}
void copy_huge_page(struct page *dst, struct page *src)
{
int i;
int nr_pages;
if (PageHuge(src)) {
/* hugetlbfs page */
struct hstate *h = page_hstate(src);
nr_pages = pages_per_huge_page(h);
if (unlikely(nr_pages > MAX_ORDER_NR_PAGES)) {
__copy_gigantic_page(dst, src, nr_pages);
return;
}
} else {
/* thp page */
BUG_ON(!PageTransHuge(src));
nr_pages = thp_nr_pages(src);
}
for (i = 0; i < nr_pages; i++) {
cond_resched();
copy_highpage(dst + i, src + i);
}
}
/*
* Copy the page to its new location
*/
......
......@@ -731,6 +731,16 @@ int __page_mapcount(struct page *page)
}
EXPORT_SYMBOL_GPL(__page_mapcount);
void copy_huge_page(struct page *dst, struct page *src)
{
unsigned i, nr = compound_nr(src);
for (i = 0; i < nr; i++) {
cond_resched();
copy_highpage(nth_page(dst, i), nth_page(src, i));
}
}
int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;
int sysctl_overcommit_ratio __read_mostly = 50;
unsigned long sysctl_overcommit_kbytes __read_mostly;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment