Commit 3910a7f4 authored by Michael Ellerman's avatar Michael Ellerman

powerpc/mm: Add pte_xchg() helper

We have five locations in 64-bit hash MMU code that do a cmpxchg() of a
PTE. Currently doing it inline OK, but in a future patch we will be
converting the PTEs to __be64 in some configs. In that case we will need
casts at every cmpxchg() site in order to keep sparse happy.

So move the logic into a helper, this is a reasonably nice cleanup on
its own.
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 4bece39b
......@@ -319,11 +319,8 @@ static inline pte_t kvmppc_read_update_linux_pte(pte_t *ptep, int writing)
if (writing && pte_write(old_pte))
new_pte = pte_mkdirty(new_pte);
if (pte_val(old_pte) == __cmpxchg_u64((unsigned long *)ptep,
pte_val(old_pte),
pte_val(new_pte))) {
if (pte_xchg(ptep, old_pte, new_pte))
break;
}
}
return new_pte;
}
......
......@@ -54,4 +54,16 @@ typedef struct { pte_t pte; unsigned long hidx; } real_pte_t;
#else
typedef struct { pte_t pte; } real_pte_t;
#endif
#ifdef CONFIG_PPC_STD_MMU_64
#include <asm/cmpxchg.h>
static inline bool pte_xchg(pte_t *ptep, pte_t old, pte_t new)
{
unsigned long *p = (unsigned long *)ptep;
return pte_val(old) == __cmpxchg_u64(p, pte_val(old), pte_val(new));
}
#endif
#endif /* _ASM_POWERPC_PGTABLE_TYPES_H */
......@@ -47,8 +47,8 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED;
if (access & _PAGE_RW)
new_pte |= _PAGE_DIRTY;
} while (old_pte != __cmpxchg_u64((unsigned long *)ptep,
old_pte, new_pte));
} while (!pte_xchg(ptep, __pte(old_pte), __pte(new_pte)));
/*
* PP bits. _PAGE_USER is already PP bit 0x2, so we only
* need to add in 0x1 if it's a read-only user page
......
......@@ -79,8 +79,8 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED | _PAGE_COMBO;
if (access & _PAGE_RW)
new_pte |= _PAGE_DIRTY;
} while (old_pte != __cmpxchg_u64((unsigned long *)ptep,
old_pte, new_pte));
} while (!pte_xchg(ptep, __pte(old_pte), __pte(new_pte)));
/*
* Handle the subpage protection bits
*/
......@@ -254,8 +254,7 @@ int __hash_page_64K(unsigned long ea, unsigned long access,
new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED;
if (access & _PAGE_RW)
new_pte |= _PAGE_DIRTY;
} while (old_pte != __cmpxchg_u64((unsigned long *)ptep,
old_pte, new_pte));
} while (!pte_xchg(ptep, __pte(old_pte), __pte(new_pte)));
rflags = htab_convert_pte_flags(new_pte);
......
......@@ -57,8 +57,8 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED;
if (access & _PAGE_RW)
new_pte |= _PAGE_DIRTY;
} while(old_pte != __cmpxchg_u64((unsigned long *)ptep,
old_pte, new_pte));
} while(!pte_xchg(ptep, __pte(old_pte), __pte(new_pte)));
rflags = htab_convert_pte_flags(new_pte);
sz = ((1UL) << shift);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment