Commit 9b604722 authored by Mark Rutland's avatar Mark Rutland Committed by Catalin Marinas

arm64: mm: avoid redundant READ_ONCE(*ptep)

In set_pte_at(), we read the old pte value so that it can be passed into
checks for racy hw updates. These checks are only performed for
CONFIG_DEBUG_VM, and the value is not used otherwise.

Since we read the pte value with READ_ONCE(), the compiler cannot elide
the redundant read for !CONFIG_DEBUG_VM kernels.

Let's ameliorate matters by moving the read and the checks into a
helper, __check_racy_pte_update(), which only performs the read when the
value will be used. This also allows us to reformat the conditions for
clarity.
Acked-by: default avatarWill Deacon <will.deacon@arm.com>
Signed-off-by: default avatarMark Rutland <mark.rutland@arm.com>
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent 4745224b
...@@ -246,29 +246,42 @@ extern void __sync_icache_dcache(pte_t pteval); ...@@ -246,29 +246,42 @@ extern void __sync_icache_dcache(pte_t pteval);
* *
* PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY) * PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY)
*/ */
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte) static inline void __check_racy_pte_update(struct mm_struct *mm, pte_t *ptep,
pte_t pte)
{ {
pte_t old_pte; pte_t old_pte;
if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte)) if (!IS_ENABLED(CONFIG_DEBUG_VM))
__sync_icache_dcache(pte); return;
old_pte = READ_ONCE(*ptep);
if (!pte_valid(old_pte) || !pte_valid(pte))
return;
if (mm != current->active_mm && atomic_read(&mm->mm_users) <= 1)
return;
/* /*
* If the existing pte is valid, check for potential race with * Check for potential race with hardware updates of the pte
* hardware updates of the pte (ptep_set_access_flags safely changes * (ptep_set_access_flags safely changes valid ptes without going
* valid ptes without going through an invalid entry). * through an invalid entry).
*/ */
old_pte = READ_ONCE(*ptep); VM_WARN_ONCE(!pte_young(pte),
if (IS_ENABLED(CONFIG_DEBUG_VM) && pte_valid(old_pte) && pte_valid(pte) && "%s: racy access flag clearing: 0x%016llx -> 0x%016llx",
(mm == current->active_mm || atomic_read(&mm->mm_users) > 1)) { __func__, pte_val(old_pte), pte_val(pte));
VM_WARN_ONCE(!pte_young(pte), VM_WARN_ONCE(pte_write(old_pte) && !pte_dirty(pte),
"%s: racy access flag clearing: 0x%016llx -> 0x%016llx", "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx",
__func__, pte_val(old_pte), pte_val(pte)); __func__, pte_val(old_pte), pte_val(pte));
VM_WARN_ONCE(pte_write(old_pte) && !pte_dirty(pte), }
"%s: racy dirty state clearing: 0x%016llx -> 0x%016llx",
__func__, pte_val(old_pte), pte_val(pte)); static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
} pte_t *ptep, pte_t pte)
{
if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte))
__sync_icache_dcache(pte);
__check_racy_pte_update(mm, ptep, pte);
set_pte(ptep, pte); set_pte(ptep, pte);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment