Commit d9101bfa authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Michael Ellerman

powerpc/mm/mce: Keep irqs disabled during lockless page table walk

__find_linux_mm_pte() returns a page table entry pointer after walking
the page table without holding locks. To make it safe against a THP
split and/or collapse, we disable interrupts around the lockless page
table walk. However we need to keep interrupts disabled as long as we
use the page table entry pointer that is returned.

Fix addr_to_pfn() to do that.

Fixes: ba41e1e1 ("powerpc/mce: Hookup derror (load/store) UE errors")
Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
[mpe: Rearrange code slightly and tweak change log wording]
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20190918145328.28602-1-aneesh.kumar@linux.ibm.com
parent 7c1bb6bb
......@@ -29,7 +29,7 @@ unsigned long addr_to_pfn(struct pt_regs *regs, unsigned long addr)
{
pte_t *ptep;
unsigned int shift;
unsigned long flags;
unsigned long pfn, flags;
struct mm_struct *mm;
if (user_mode(regs))
......@@ -39,18 +39,22 @@ unsigned long addr_to_pfn(struct pt_regs *regs, unsigned long addr)
local_irq_save(flags);
ptep = __find_linux_pte(mm->pgd, addr, NULL, &shift);
local_irq_restore(flags);
if (!ptep || pte_special(*ptep))
return ULONG_MAX;
if (!ptep || pte_special(*ptep)) {
pfn = ULONG_MAX;
goto out;
}
if (shift > PAGE_SHIFT) {
if (shift <= PAGE_SHIFT)
pfn = pte_pfn(*ptep);
else {
unsigned long rpnmask = (1ul << shift) - PAGE_SIZE;
return pte_pfn(__pte(pte_val(*ptep) | (addr & rpnmask)));
pfn = pte_pfn(__pte(pte_val(*ptep) | (addr & rpnmask)));
}
return pte_pfn(*ptep);
out:
local_irq_restore(flags);
return pfn;
}
/* flush SLBs and reload */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment