Commit 4231aba0 authored by Nicholas Piggin's avatar Nicholas Piggin Committed by Michael Ellerman

powerpc/64s: Fix page table fragment refcount race vs speculative references

The page table fragment allocator uses the main page refcount racily
with respect to speculative references. A customer observed a BUG due
to page table page refcount underflow in the fragment allocator. This
can be caused by the fragment allocator set_page_count stomping on a
speculative reference, and then the speculative failure handler
decrements the new reference, and the underflow eventually pops when
the page tables are freed.

Fix this by using a dedicated field in the struct page for the page
table fragment allocator.

Fixes: 5c1f6ee9 ("powerpc: Reduce PTE table memory wastage")
Cc: stable@vger.kernel.org # v3.10+
Reviewed-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Signed-off-by: default avatarNicholas Piggin <npiggin@gmail.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent a0ac3687
...@@ -200,9 +200,9 @@ static void pte_frag_destroy(void *pte_frag) ...@@ -200,9 +200,9 @@ static void pte_frag_destroy(void *pte_frag)
/* drop all the pending references */ /* drop all the pending references */
count = ((unsigned long)pte_frag & ~PAGE_MASK) >> PTE_FRAG_SIZE_SHIFT; count = ((unsigned long)pte_frag & ~PAGE_MASK) >> PTE_FRAG_SIZE_SHIFT;
/* We allow PTE_FRAG_NR fragments from a PTE page */ /* We allow PTE_FRAG_NR fragments from a PTE page */
if (page_ref_sub_and_test(page, PTE_FRAG_NR - count)) { if (atomic_sub_and_test(PTE_FRAG_NR - count, &page->pt_frag_refcount)) {
pgtable_page_dtor(page); pgtable_page_dtor(page);
free_unref_page(page); __free_page(page);
} }
} }
...@@ -215,9 +215,9 @@ static void pmd_frag_destroy(void *pmd_frag) ...@@ -215,9 +215,9 @@ static void pmd_frag_destroy(void *pmd_frag)
/* drop all the pending references */ /* drop all the pending references */
count = ((unsigned long)pmd_frag & ~PAGE_MASK) >> PMD_FRAG_SIZE_SHIFT; count = ((unsigned long)pmd_frag & ~PAGE_MASK) >> PMD_FRAG_SIZE_SHIFT;
/* We allow PTE_FRAG_NR fragments from a PTE page */ /* We allow PTE_FRAG_NR fragments from a PTE page */
if (page_ref_sub_and_test(page, PMD_FRAG_NR - count)) { if (atomic_sub_and_test(PMD_FRAG_NR - count, &page->pt_frag_refcount)) {
pgtable_pmd_page_dtor(page); pgtable_pmd_page_dtor(page);
free_unref_page(page); __free_page(page);
} }
} }
......
...@@ -270,6 +270,8 @@ static pmd_t *__alloc_for_pmdcache(struct mm_struct *mm) ...@@ -270,6 +270,8 @@ static pmd_t *__alloc_for_pmdcache(struct mm_struct *mm)
return NULL; return NULL;
} }
atomic_set(&page->pt_frag_refcount, 1);
ret = page_address(page); ret = page_address(page);
/* /*
* if we support only one fragment just return the * if we support only one fragment just return the
...@@ -285,7 +287,7 @@ static pmd_t *__alloc_for_pmdcache(struct mm_struct *mm) ...@@ -285,7 +287,7 @@ static pmd_t *__alloc_for_pmdcache(struct mm_struct *mm)
* count. * count.
*/ */
if (likely(!mm->context.pmd_frag)) { if (likely(!mm->context.pmd_frag)) {
set_page_count(page, PMD_FRAG_NR); atomic_set(&page->pt_frag_refcount, PMD_FRAG_NR);
mm->context.pmd_frag = ret + PMD_FRAG_SIZE; mm->context.pmd_frag = ret + PMD_FRAG_SIZE;
} }
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
...@@ -308,9 +310,10 @@ void pmd_fragment_free(unsigned long *pmd) ...@@ -308,9 +310,10 @@ void pmd_fragment_free(unsigned long *pmd)
{ {
struct page *page = virt_to_page(pmd); struct page *page = virt_to_page(pmd);
if (put_page_testzero(page)) { BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0);
if (atomic_dec_and_test(&page->pt_frag_refcount)) {
pgtable_pmd_page_dtor(page); pgtable_pmd_page_dtor(page);
free_unref_page(page); __free_page(page);
} }
} }
...@@ -352,6 +355,7 @@ static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel) ...@@ -352,6 +355,7 @@ static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel)
return NULL; return NULL;
} }
atomic_set(&page->pt_frag_refcount, 1);
ret = page_address(page); ret = page_address(page);
/* /*
...@@ -367,7 +371,7 @@ static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel) ...@@ -367,7 +371,7 @@ static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel)
* count. * count.
*/ */
if (likely(!mm->context.pte_frag)) { if (likely(!mm->context.pte_frag)) {
set_page_count(page, PTE_FRAG_NR); atomic_set(&page->pt_frag_refcount, PTE_FRAG_NR);
mm->context.pte_frag = ret + PTE_FRAG_SIZE; mm->context.pte_frag = ret + PTE_FRAG_SIZE;
} }
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
...@@ -390,10 +394,11 @@ void pte_fragment_free(unsigned long *table, int kernel) ...@@ -390,10 +394,11 @@ void pte_fragment_free(unsigned long *table, int kernel)
{ {
struct page *page = virt_to_page(table); struct page *page = virt_to_page(table);
if (put_page_testzero(page)) { BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0);
if (atomic_dec_and_test(&page->pt_frag_refcount)) {
if (!kernel) if (!kernel)
pgtable_page_dtor(page); pgtable_page_dtor(page);
free_unref_page(page); __free_page(page);
} }
} }
......
...@@ -139,7 +139,10 @@ struct page { ...@@ -139,7 +139,10 @@ struct page {
unsigned long _pt_pad_1; /* compound_head */ unsigned long _pt_pad_1; /* compound_head */
pgtable_t pmd_huge_pte; /* protected by page->ptl */ pgtable_t pmd_huge_pte; /* protected by page->ptl */
unsigned long _pt_pad_2; /* mapping */ unsigned long _pt_pad_2; /* mapping */
struct mm_struct *pt_mm; /* x86 pgds only */ union {
struct mm_struct *pt_mm; /* x86 pgds only */
atomic_t pt_frag_refcount; /* powerpc */
};
#if ALLOC_SPLIT_PTLOCKS #if ALLOC_SPLIT_PTLOCKS
spinlock_t *ptl; spinlock_t *ptl;
#else #else
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment