Commit 438e69b5 authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Michael Ellerman

powerpc/mm/radix: Skip ptesync in pte update helpers

We do them at the start of tlb flush, and we are sure a pte update will be
followed by a tlbflush. Hence we can skip the ptesync in pte update helpers.
Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Tested-by: default avatarMichael Neuling <mikey@neuling.org>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent f4894b80
...@@ -144,13 +144,11 @@ static inline unsigned long radix__pte_update(struct mm_struct *mm, ...@@ -144,13 +144,11 @@ static inline unsigned long radix__pte_update(struct mm_struct *mm,
* new value of pte * new value of pte
*/ */
new_pte = (old_pte | set) & ~clr; new_pte = (old_pte | set) & ~clr;
asm volatile("ptesync" : : : "memory");
radix__flush_tlb_pte_p9_dd1(old_pte, mm, addr); radix__flush_tlb_pte_p9_dd1(old_pte, mm, addr);
if (new_pte) if (new_pte)
__radix_pte_update(ptep, 0, new_pte); __radix_pte_update(ptep, 0, new_pte);
} else } else
old_pte = __radix_pte_update(ptep, clr, set); old_pte = __radix_pte_update(ptep, clr, set);
asm volatile("ptesync" : : : "memory");
if (!huge) if (!huge)
assert_pte_locked(mm, addr); assert_pte_locked(mm, addr);
...@@ -195,7 +193,6 @@ static inline void radix__ptep_set_access_flags(struct mm_struct *mm, ...@@ -195,7 +193,6 @@ static inline void radix__ptep_set_access_flags(struct mm_struct *mm,
unsigned long old_pte, new_pte; unsigned long old_pte, new_pte;
old_pte = __radix_pte_update(ptep, ~0, 0); old_pte = __radix_pte_update(ptep, ~0, 0);
asm volatile("ptesync" : : : "memory");
/* /*
* new value of pte * new value of pte
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment