Commit d0ebc08f authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] ppc64: hugepage bugfix

From: David Gibson <david@gibson.dropbear.id.au>

Found this again while looking at hugepage extensions.  Haven't actually had
it bite yet - the race is small and the other bug will never be triggered in
32-bit processes, and the function is rarely called on 64-bit processes.

This patch fixes two bugs in the (same part of the) PPC64 hugepage code.
First the method we were using to free stale PTE pages was not safe with some
recent changes (race condition).  BenH has fixed this to work in the new way.
 Second, we were not checking for a valid PGD entry before dereferencing the
PMD page when scanning for stale PTE page pointers.
parent b4e0dd09
......@@ -247,6 +247,7 @@ static int open_32bit_htlbpage_range(struct mm_struct *mm)
{
struct vm_area_struct *vma;
unsigned long addr;
struct mmu_gather *tlb;
if (mm->context.low_hpages)
return 0; /* The window is already open */
......@@ -262,32 +263,39 @@ static int open_32bit_htlbpage_range(struct mm_struct *mm)
/* Clean up any leftover PTE pages in the region */
spin_lock(&mm->page_table_lock);
tlb = tlb_gather_mmu(mm, 0);
for (addr = TASK_HPAGE_BASE_32; addr < TASK_HPAGE_END_32;
addr += PMD_SIZE) {
pgd_t *pgd = pgd_offset(mm, addr);
pmd_t *pmd = pmd_offset(pgd, addr);
if (! pmd_none(*pmd)) {
struct page *page = pmd_page(*pmd);
pte_t *pte = (pte_t *)pmd_page_kernel(*pmd);
pmd_t *pmd;
struct page *page;
pte_t *pte;
int i;
/* No VMAs, so there should be no PTEs, check
* just in case. */
if (pgd_none(*pgd))
continue;
pmd = pmd_offset(pgd, addr);
if (!pmd || pmd_none(*pmd))
continue;
if (pmd_bad(*pmd)) {
pmd_ERROR(*pmd);
pmd_clear(pmd);
continue;
}
pte = (pte_t *)pmd_page_kernel(*pmd);
/* No VMAs, so there should be no PTEs, check just in case. */
for (i = 0; i < PTRS_PER_PTE; i++) {
BUG_ON(! pte_none(*pte));
BUG_ON(!pte_none(*pte));
pte++;
}
page = pmd_page(*pmd);
pmd_clear(pmd);
pgtable_remove_rmap(page);
pte_free(page);
}
pte_free_tlb(tlb, page);
}
tlb_finish_mmu(tlb, TASK_HPAGE_BASE_32, TASK_HPAGE_END_32);
spin_unlock(&mm->page_table_lock);
/* FIXME: do we need to scan for PTEs too? */
mm->context.low_hpages = 1;
/* the context change must make it to memory before the slbia,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment