Commit 2f92447f authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Michael Ellerman

powerpc/book3s64/hash: Use the pte_t address from the caller

Don't fetch the pte value using lockless page table walk. Instead use the value from the
caller. hash_preload is called with ptl lock held. So it is safe to use the
pte_t address directly.
Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20200505071729.54912-6-aneesh.kumar@linux.ibm.com
parent 7900757c
...@@ -1546,14 +1546,11 @@ static bool should_hash_preload(struct mm_struct *mm, unsigned long ea) ...@@ -1546,14 +1546,11 @@ static bool should_hash_preload(struct mm_struct *mm, unsigned long ea)
} }
#endif #endif
static void hash_preload(struct mm_struct *mm, unsigned long ea, static void hash_preload(struct mm_struct *mm, pte_t *ptep, unsigned long ea,
bool is_exec, unsigned long trap) bool is_exec, unsigned long trap)
{ {
int hugepage_shift;
unsigned long vsid; unsigned long vsid;
pgd_t *pgdir; pgd_t *pgdir;
pte_t *ptep;
unsigned long flags;
int rc, ssize, update_flags = 0; int rc, ssize, update_flags = 0;
unsigned long access = _PAGE_PRESENT | _PAGE_READ | (is_exec ? _PAGE_EXEC : 0); unsigned long access = _PAGE_PRESENT | _PAGE_READ | (is_exec ? _PAGE_EXEC : 0);
...@@ -1575,30 +1572,18 @@ static void hash_preload(struct mm_struct *mm, unsigned long ea, ...@@ -1575,30 +1572,18 @@ static void hash_preload(struct mm_struct *mm, unsigned long ea,
vsid = get_user_vsid(&mm->context, ea, ssize); vsid = get_user_vsid(&mm->context, ea, ssize);
if (!vsid) if (!vsid)
return; return;
/*
* Hash doesn't like irqs. Walking linux page table with irq disabled
* saves us from holding multiple locks.
*/
local_irq_save(flags);
/*
* THP pages use update_mmu_cache_pmd. We don't do
* hash preload there. Hence can ignore THP here
*/
ptep = find_current_mm_pte(pgdir, ea, NULL, &hugepage_shift);
if (!ptep)
goto out_exit;
WARN_ON(hugepage_shift);
#ifdef CONFIG_PPC_64K_PAGES #ifdef CONFIG_PPC_64K_PAGES
/* If either H_PAGE_4K_PFN or cache inhibited is set (and we are on /* If either H_PAGE_4K_PFN or cache inhibited is set (and we are on
* a 64K kernel), then we don't preload, hash_page() will take * a 64K kernel), then we don't preload, hash_page() will take
* care of it once we actually try to access the page. * care of it once we actually try to access the page.
* That way we don't have to duplicate all of the logic for segment * That way we don't have to duplicate all of the logic for segment
* page size demotion here * page size demotion here
* Called with PTL held, hence can be sure the value won't change in
* between.
*/ */
if ((pte_val(*ptep) & H_PAGE_4K_PFN) || pte_ci(*ptep)) if ((pte_val(*ptep) & H_PAGE_4K_PFN) || pte_ci(*ptep))
goto out_exit; return;
#endif /* CONFIG_PPC_64K_PAGES */ #endif /* CONFIG_PPC_64K_PAGES */
/* Is that local to this CPU ? */ /* Is that local to this CPU ? */
...@@ -1623,8 +1608,6 @@ static void hash_preload(struct mm_struct *mm, unsigned long ea, ...@@ -1623,8 +1608,6 @@ static void hash_preload(struct mm_struct *mm, unsigned long ea,
mm_ctx_user_psize(&mm->context), mm_ctx_user_psize(&mm->context),
mm_ctx_user_psize(&mm->context), mm_ctx_user_psize(&mm->context),
pte_val(*ptep)); pte_val(*ptep));
out_exit:
local_irq_restore(flags);
} }
/* /*
...@@ -1675,7 +1658,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, ...@@ -1675,7 +1658,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
return; return;
} }
hash_preload(vma->vm_mm, address, is_exec, trap); hash_preload(vma->vm_mm, ptep, address, is_exec, trap);
} }
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment