Commit 27c73ae7 authored by Andrea Arcangeli's avatar Andrea Arcangeli Committed by Linus Torvalds

mm: hugetlbfs: fix hugetlbfs optimization

Commit 7cb2ef56 ("mm: fix aio performance regression for database
caused by THP") can cause dereference of a dangling pointer if
split_huge_page runs during PageHuge() if there are updates to the
tail_page->private field.

Also it is repeating compound_head twice for hugetlbfs and it is running
compound_head+compound_trans_head for THP when a single one is needed in
both cases.

The new code within the PageSlab() check doesn't need to verify that the
THP page size is never bigger than the smallest hugetlbfs page size, to
avoid memory corruption.

A longstanding theoretical race condition was found while fixing the
above (see the change right after the skip_unlock label, that is
relevant for the compound_lock path too).

By re-establishing the _mapcount tail refcounting for all compound
pages, this also fixes the below problem:

  echo 0 >/sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages

  BUG: Bad page state in process bash  pfn:59a01
  page:ffffea000139b038 count:0 mapcount:10 mapping:          (null) index:0x0
  page flags: 0x1c00000000008000(tail)
  Modules linked in:
  CPU: 6 PID: 2018 Comm: bash Not tainted 3.12.0+ #25
  Hardware name: Bochs Bochs, BIOS Bochs 01/01/2011
  Call Trace:
    dump_stack+0x55/0x76
    bad_page+0xd5/0x130
    free_pages_prepare+0x213/0x280
    __free_pages+0x36/0x80
    update_and_free_page+0xc1/0xd0
    free_pool_huge_page+0xc2/0xe0
    set_max_huge_pages.part.58+0x14c/0x220
    nr_hugepages_store_common.isra.60+0xd0/0xf0
    nr_hugepages_store+0x13/0x20
    kobj_attr_store+0xf/0x20
    sysfs_write_file+0x189/0x1e0
    vfs_write+0xc5/0x1f0
    SyS_write+0x55/0xb0
    system_call_fastpath+0x16/0x1b
Signed-off-by: default avatarKhalid Aziz <khalid.aziz@oracle.com>
Signed-off-by: default avatarAndrea Arcangeli <aarcange@redhat.com>
Tested-by: default avatarKhalid Aziz <khalid.aziz@oracle.com>
Cc: Pravin Shelar <pshelar@nicira.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Ben Hutchings <bhutchings@solarflare.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Johannes Weiner <jweiner@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Rik van Riel <riel@redhat.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Minchan Kim <minchan@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 044c8d4b
...@@ -31,6 +31,7 @@ struct hugepage_subpool *hugepage_new_subpool(long nr_blocks); ...@@ -31,6 +31,7 @@ struct hugepage_subpool *hugepage_new_subpool(long nr_blocks);
void hugepage_put_subpool(struct hugepage_subpool *spool); void hugepage_put_subpool(struct hugepage_subpool *spool);
int PageHuge(struct page *page); int PageHuge(struct page *page);
int PageHeadHuge(struct page *page_head);
void reset_vma_resv_huge_pages(struct vm_area_struct *vma); void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
...@@ -103,6 +104,11 @@ static inline int PageHuge(struct page *page) ...@@ -103,6 +104,11 @@ static inline int PageHuge(struct page *page)
return 0; return 0;
} }
static inline int PageHeadHuge(struct page *page_head)
{
return 0;
}
static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma) static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
{ {
} }
......
...@@ -702,6 +702,23 @@ int PageHuge(struct page *page) ...@@ -702,6 +702,23 @@ int PageHuge(struct page *page)
} }
EXPORT_SYMBOL_GPL(PageHuge); EXPORT_SYMBOL_GPL(PageHuge);
/*
* PageHeadHuge() only returns true for hugetlbfs head page, but not for
* normal or transparent huge pages.
*/
int PageHeadHuge(struct page *page_head)
{
compound_page_dtor *dtor;
if (!PageHead(page_head))
return 0;
dtor = get_compound_page_dtor(page_head);
return dtor == free_huge_page;
}
EXPORT_SYMBOL_GPL(PageHeadHuge);
pgoff_t __basepage_index(struct page *page) pgoff_t __basepage_index(struct page *page)
{ {
struct page *page_head = compound_head(page); struct page *page_head = compound_head(page);
......
...@@ -82,19 +82,6 @@ static void __put_compound_page(struct page *page) ...@@ -82,19 +82,6 @@ static void __put_compound_page(struct page *page)
static void put_compound_page(struct page *page) static void put_compound_page(struct page *page)
{ {
/*
* hugetlbfs pages cannot be split from under us. If this is a
* hugetlbfs page, check refcount on head page and release the page if
* the refcount becomes zero.
*/
if (PageHuge(page)) {
page = compound_head(page);
if (put_page_testzero(page))
__put_compound_page(page);
return;
}
if (unlikely(PageTail(page))) { if (unlikely(PageTail(page))) {
/* __split_huge_page_refcount can run under us */ /* __split_huge_page_refcount can run under us */
struct page *page_head = compound_trans_head(page); struct page *page_head = compound_trans_head(page);
...@@ -111,14 +98,31 @@ static void put_compound_page(struct page *page) ...@@ -111,14 +98,31 @@ static void put_compound_page(struct page *page)
* still hot on arches that do not support * still hot on arches that do not support
* this_cpu_cmpxchg_double(). * this_cpu_cmpxchg_double().
*/ */
if (PageSlab(page_head)) { if (PageSlab(page_head) || PageHeadHuge(page_head)) {
if (PageTail(page)) { if (likely(PageTail(page))) {
/*
* __split_huge_page_refcount
* cannot race here.
*/
VM_BUG_ON(!PageHead(page_head));
atomic_dec(&page->_mapcount);
if (put_page_testzero(page_head)) if (put_page_testzero(page_head))
VM_BUG_ON(1); VM_BUG_ON(1);
if (put_page_testzero(page_head))
atomic_dec(&page->_mapcount); __put_compound_page(page_head);
goto skip_lock_tail; return;
} else } else
/*
* __split_huge_page_refcount
* run before us, "page" was a
* THP tail. The split
* page_head has been freed
* and reallocated as slab or
* hugetlbfs page of smaller
* order (only possible if
* reallocated as slab on
* x86).
*/
goto skip_lock; goto skip_lock;
} }
/* /*
...@@ -132,8 +136,27 @@ static void put_compound_page(struct page *page) ...@@ -132,8 +136,27 @@ static void put_compound_page(struct page *page)
/* __split_huge_page_refcount run before us */ /* __split_huge_page_refcount run before us */
compound_unlock_irqrestore(page_head, flags); compound_unlock_irqrestore(page_head, flags);
skip_lock: skip_lock:
if (put_page_testzero(page_head)) if (put_page_testzero(page_head)) {
/*
* The head page may have been
* freed and reallocated as a
* compound page of smaller
* order and then freed again.
* All we know is that it
* cannot have become: a THP
* page, a compound page of
* higher order, a tail page.
* That is because we still
* hold the refcount of the
* split THP tail and
* page_head was the THP head
* before the split.
*/
if (PageHead(page_head))
__put_compound_page(page_head);
else
__put_single_page(page_head); __put_single_page(page_head);
}
out_put_single: out_put_single:
if (put_page_testzero(page)) if (put_page_testzero(page))
__put_single_page(page); __put_single_page(page);
...@@ -155,7 +178,6 @@ static void put_compound_page(struct page *page) ...@@ -155,7 +178,6 @@ static void put_compound_page(struct page *page)
VM_BUG_ON(atomic_read(&page->_count) != 0); VM_BUG_ON(atomic_read(&page->_count) != 0);
compound_unlock_irqrestore(page_head, flags); compound_unlock_irqrestore(page_head, flags);
skip_lock_tail:
if (put_page_testzero(page_head)) { if (put_page_testzero(page_head)) {
if (PageHead(page_head)) if (PageHead(page_head))
__put_compound_page(page_head); __put_compound_page(page_head);
...@@ -198,30 +220,32 @@ bool __get_page_tail(struct page *page) ...@@ -198,30 +220,32 @@ bool __get_page_tail(struct page *page)
* proper PT lock that already serializes against * proper PT lock that already serializes against
* split_huge_page(). * split_huge_page().
*/ */
bool got = false;
struct page *page_head;
/*
* If this is a hugetlbfs page it cannot be split under us. Simply
* increment refcount for the head page.
*/
if (PageHuge(page)) {
page_head = compound_head(page);
atomic_inc(&page_head->_count);
got = true;
} else {
unsigned long flags; unsigned long flags;
bool got = false;
struct page *page_head = compound_trans_head(page);
page_head = compound_trans_head(page); if (likely(page != page_head && get_page_unless_zero(page_head))) {
if (likely(page != page_head &&
get_page_unless_zero(page_head))) {
/* Ref to put_compound_page() comment. */ /* Ref to put_compound_page() comment. */
if (PageSlab(page_head)) { if (PageSlab(page_head) || PageHeadHuge(page_head)) {
if (likely(PageTail(page))) { if (likely(PageTail(page))) {
/*
* This is a hugetlbfs page or a slab
* page. __split_huge_page_refcount
* cannot race here.
*/
VM_BUG_ON(!PageHead(page_head));
__get_page_tail_foll(page, false); __get_page_tail_foll(page, false);
return true; return true;
} else { } else {
/*
* __split_huge_page_refcount run
* before us, "page" was a THP
* tail. The split page_head has been
* freed and reallocated as slab or
* hugetlbfs page of smaller order
* (only possible if reallocated as
* slab on x86).
*/
put_page(page_head); put_page(page_head);
return false; return false;
} }
...@@ -243,7 +267,6 @@ bool __get_page_tail(struct page *page) ...@@ -243,7 +267,6 @@ bool __get_page_tail(struct page *page)
if (unlikely(!got)) if (unlikely(!got))
put_page(page_head); put_page(page_head);
} }
}
return got; return got;
} }
EXPORT_SYMBOL(__get_page_tail); EXPORT_SYMBOL(__get_page_tail);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment