Commit 668f9abb authored by David Rientjes's avatar David Rientjes Committed by Linus Torvalds

mm: close PageTail race

Commit bf6bddf1 ("mm: introduce compaction and migration for
ballooned pages") introduces page_count(page) into memory compaction
which dereferences page->first_page if PageTail(page).

This results in a very rare NULL pointer dereference on the
aforementioned page_count(page).  Indeed, anything that does
compound_head(), including page_count() is susceptible to racing with
prep_compound_page() and seeing a NULL or dangling page->first_page
pointer.

This patch uses Andrea's implementation of compound_trans_head() that
deals with such a race and makes it the default compound_head()
implementation.  This includes a read memory barrier that ensures that
if PageTail(head) is true that we return a head page that is neither
NULL nor dangling.  The patch then adds a store memory barrier to
prep_compound_page() to ensure page->first_page is set.

This is the safest way to ensure we see the head page that we are
expecting, PageTail(page) is already in the unlikely() path and the
memory barriers are unfortunately required.

Hugetlbfs is the exception, we don't enforce a store memory barrier
during init since no race is possible.
Signed-off-by: default avatarDavid Rientjes <rientjes@google.com>
Cc: Holger Kiehl <Holger.Kiehl@dwd.de>
Cc: Christoph Lameter <cl@linux.com>
Cc: Rafael Aquini <aquini@redhat.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent aa15aa0e
...@@ -874,7 +874,7 @@ bio_pageinc(struct bio *bio) ...@@ -874,7 +874,7 @@ bio_pageinc(struct bio *bio)
/* Non-zero page count for non-head members of /* Non-zero page count for non-head members of
* compound pages is no longer allowed by the kernel. * compound pages is no longer allowed by the kernel.
*/ */
page = compound_trans_head(bv.bv_page); page = compound_head(bv.bv_page);
atomic_inc(&page->_count); atomic_inc(&page->_count);
} }
} }
...@@ -887,7 +887,7 @@ bio_pagedec(struct bio *bio) ...@@ -887,7 +887,7 @@ bio_pagedec(struct bio *bio)
struct bvec_iter iter; struct bvec_iter iter;
bio_for_each_segment(bv, bio, iter) { bio_for_each_segment(bv, bio, iter) {
page = compound_trans_head(bv.bv_page); page = compound_head(bv.bv_page);
atomic_dec(&page->_count); atomic_dec(&page->_count);
} }
} }
......
...@@ -186,12 +186,12 @@ static bool is_invalid_reserved_pfn(unsigned long pfn) ...@@ -186,12 +186,12 @@ static bool is_invalid_reserved_pfn(unsigned long pfn)
if (pfn_valid(pfn)) { if (pfn_valid(pfn)) {
bool reserved; bool reserved;
struct page *tail = pfn_to_page(pfn); struct page *tail = pfn_to_page(pfn);
struct page *head = compound_trans_head(tail); struct page *head = compound_head(tail);
reserved = !!(PageReserved(head)); reserved = !!(PageReserved(head));
if (head != tail) { if (head != tail) {
/* /*
* "head" is not a dangling pointer * "head" is not a dangling pointer
* (compound_trans_head takes care of that) * (compound_head takes care of that)
* but the hugepage may have been split * but the hugepage may have been split
* from under us (and we may not hold a * from under us (and we may not hold a
* reference count on the head page so it can * reference count on the head page so it can
......
...@@ -121,9 +121,8 @@ u64 stable_page_flags(struct page *page) ...@@ -121,9 +121,8 @@ u64 stable_page_flags(struct page *page)
* just checks PG_head/PG_tail, so we need to check PageLRU/PageAnon * just checks PG_head/PG_tail, so we need to check PageLRU/PageAnon
* to make sure a given page is a thp, not a non-huge compound page. * to make sure a given page is a thp, not a non-huge compound page.
*/ */
else if (PageTransCompound(page) && else if (PageTransCompound(page) && (PageLRU(compound_head(page)) ||
(PageLRU(compound_trans_head(page)) || PageAnon(compound_head(page))))
PageAnon(compound_trans_head(page))))
u |= 1 << KPF_THP; u |= 1 << KPF_THP;
/* /*
......
...@@ -157,46 +157,6 @@ static inline int hpage_nr_pages(struct page *page) ...@@ -157,46 +157,6 @@ static inline int hpage_nr_pages(struct page *page)
return HPAGE_PMD_NR; return HPAGE_PMD_NR;
return 1; return 1;
} }
/*
* compound_trans_head() should be used instead of compound_head(),
* whenever the "page" passed as parameter could be the tail of a
* transparent hugepage that could be undergoing a
* __split_huge_page_refcount(). The page structure layout often
* changes across releases and it makes extensive use of unions. So if
* the page structure layout will change in a way that
* page->first_page gets clobbered by __split_huge_page_refcount, the
* implementation making use of smp_rmb() will be required.
*
* Currently we define compound_trans_head as compound_head, because
* page->private is in the same union with page->first_page, and
* page->private isn't clobbered. However this also means we're
* currently leaving dirt into the page->private field of anonymous
* pages resulting from a THP split, instead of setting page->private
* to zero like for every other page that has PG_private not set. But
* anonymous pages don't use page->private so this is not a problem.
*/
#if 0
/* This will be needed if page->private will be clobbered in split_huge_page */
static inline struct page *compound_trans_head(struct page *page)
{
if (PageTail(page)) {
struct page *head;
head = page->first_page;
smp_rmb();
/*
* head may be a dangling pointer.
* __split_huge_page_refcount clears PageTail before
* overwriting first_page, so if PageTail is still
* there it means the head pointer isn't dangling.
*/
if (PageTail(page))
return head;
}
return page;
}
#else
#define compound_trans_head(page) compound_head(page)
#endif
extern int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, extern int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, pmd_t pmd, pmd_t *pmdp); unsigned long addr, pmd_t pmd, pmd_t *pmdp);
...@@ -226,7 +186,6 @@ static inline int split_huge_page(struct page *page) ...@@ -226,7 +186,6 @@ static inline int split_huge_page(struct page *page)
do { } while (0) do { } while (0)
#define split_huge_page_pmd_mm(__mm, __address, __pmd) \ #define split_huge_page_pmd_mm(__mm, __address, __pmd) \
do { } while (0) do { } while (0)
#define compound_trans_head(page) compound_head(page)
static inline int hugepage_madvise(struct vm_area_struct *vma, static inline int hugepage_madvise(struct vm_area_struct *vma,
unsigned long *vm_flags, int advice) unsigned long *vm_flags, int advice)
{ {
......
...@@ -399,8 +399,18 @@ static inline void compound_unlock_irqrestore(struct page *page, ...@@ -399,8 +399,18 @@ static inline void compound_unlock_irqrestore(struct page *page,
static inline struct page *compound_head(struct page *page) static inline struct page *compound_head(struct page *page)
{ {
if (unlikely(PageTail(page))) if (unlikely(PageTail(page))) {
return page->first_page; struct page *head = page->first_page;
/*
* page->first_page may be a dangling pointer to an old
* compound page, so recheck that it is still a tail
* page before returning.
*/
smp_rmb();
if (likely(PageTail(page)))
return head;
}
return page; return page;
} }
......
...@@ -444,7 +444,7 @@ static void break_cow(struct rmap_item *rmap_item) ...@@ -444,7 +444,7 @@ static void break_cow(struct rmap_item *rmap_item)
static struct page *page_trans_compound_anon(struct page *page) static struct page *page_trans_compound_anon(struct page *page)
{ {
if (PageTransCompound(page)) { if (PageTransCompound(page)) {
struct page *head = compound_trans_head(page); struct page *head = compound_head(page);
/* /*
* head may actually be splitted and freed from under * head may actually be splitted and freed from under
* us but it's ok here. * us but it's ok here.
......
...@@ -1651,7 +1651,7 @@ int soft_offline_page(struct page *page, int flags) ...@@ -1651,7 +1651,7 @@ int soft_offline_page(struct page *page, int flags)
{ {
int ret; int ret;
unsigned long pfn = page_to_pfn(page); unsigned long pfn = page_to_pfn(page);
struct page *hpage = compound_trans_head(page); struct page *hpage = compound_head(page);
if (PageHWPoison(page)) { if (PageHWPoison(page)) {
pr_info("soft offline: %#lx page already poisoned\n", pfn); pr_info("soft offline: %#lx page already poisoned\n", pfn);
......
...@@ -369,9 +369,11 @@ void prep_compound_page(struct page *page, unsigned long order) ...@@ -369,9 +369,11 @@ void prep_compound_page(struct page *page, unsigned long order)
__SetPageHead(page); __SetPageHead(page);
for (i = 1; i < nr_pages; i++) { for (i = 1; i < nr_pages; i++) {
struct page *p = page + i; struct page *p = page + i;
__SetPageTail(p);
set_page_count(p, 0); set_page_count(p, 0);
p->first_page = page; p->first_page = page;
/* Make sure p->first_page is always valid for PageTail() */
smp_wmb();
__SetPageTail(p);
} }
} }
......
...@@ -98,7 +98,7 @@ static void put_compound_page(struct page *page) ...@@ -98,7 +98,7 @@ static void put_compound_page(struct page *page)
} }
/* __split_huge_page_refcount can run under us */ /* __split_huge_page_refcount can run under us */
page_head = compound_trans_head(page); page_head = compound_head(page);
/* /*
* THP can not break up slab pages so avoid taking * THP can not break up slab pages so avoid taking
...@@ -253,7 +253,7 @@ bool __get_page_tail(struct page *page) ...@@ -253,7 +253,7 @@ bool __get_page_tail(struct page *page)
*/ */
unsigned long flags; unsigned long flags;
bool got; bool got;
struct page *page_head = compound_trans_head(page); struct page *page_head = compound_head(page);
/* Ref to put_compound_page() comment. */ /* Ref to put_compound_page() comment. */
if (!__compound_tail_refcounted(page_head)) { if (!__compound_tail_refcounted(page_head)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment