Commit 42a27223 authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman

powerpc/nohash: Refactor pte_update()

pte_update() is similar.

Take the nohash/32 version which works on nohash/64 and add the debug
call to assert_pte_locked() which is only on nohash/64.
Signed-off-by: default avatarChristophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://msgid.link/e01cb630cad42f645915ce7702d23985241b71fc.1695659959.git.christophe.leroy@csgroup.eu
parent 0f4027ea
......@@ -9,8 +9,6 @@
#include <linux/threads.h>
#include <asm/mmu.h> /* For sub-arch specific PPC_PIN_SIZE */
extern int icache_44x_need_flush;
#endif /* __ASSEMBLY__ */
#define PTE_INDEX_SIZE PTE_SHIFT
......@@ -203,37 +201,6 @@ static inline void pmd_clear(pmd_t *pmdp)
*pmdp = __pmd(0);
}
/*
* PTE updates. This function is called whenever an existing
* valid PTE is updated. This does -not- include set_pte_at()
* which nowadays only sets a new PTE.
*
* Depending on the type of MMU, we may need to use atomic updates
* and the PTE may be either 32 or 64 bit wide. In the later case,
* when using atomic updates, only the low part of the PTE is
* accessed atomically.
*
* In addition, on 44x, we also maintain a global flag indicating
* that an executable user mapping was modified, which is needed
* to properly flush the virtually tagged instruction cache of
* those implementations.
*/
#ifndef pte_update
static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
unsigned long clr, unsigned long set, int huge)
{
pte_basic_t old = pte_val(*p);
pte_basic_t new = (old & ~(pte_basic_t)clr) | set;
*p = __pte(new);
if (IS_ENABLED(CONFIG_44x) && (old & _PAGE_USER) && (old & _PAGE_EXEC))
icache_44x_need_flush = 1;
return old;
}
#endif
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
......
......@@ -171,23 +171,6 @@ static inline void p4d_set(p4d_t *p4dp, unsigned long val)
*p4dp = __p4d(val);
}
/* Atomic PTE updates */
static inline unsigned long pte_update(struct mm_struct *mm,
unsigned long addr,
pte_t *ptep, unsigned long clr,
unsigned long set,
int huge)
{
unsigned long old = pte_val(*ptep);
*ptep = __pte((old & ~clr) | set);
/* huge pages use the old page table lock */
if (!huge)
assert_pte_locked(mm, addr);
return old;
}
static inline int pte_young(pte_t pte)
{
return pte_val(pte) & _PAGE_ACCESSED;
......
......@@ -2,6 +2,11 @@
#ifndef _ASM_POWERPC_NOHASH_PGTABLE_H
#define _ASM_POWERPC_NOHASH_PGTABLE_H
#ifndef __ASSEMBLY__
static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
unsigned long clr, unsigned long set, int huge);
#endif
#if defined(CONFIG_PPC64)
#include <asm/nohash/64/pgtable.h>
#else
......@@ -18,6 +23,43 @@
#ifndef __ASSEMBLY__
extern int icache_44x_need_flush;
/*
* PTE updates. This function is called whenever an existing
* valid PTE is updated. This does -not- include set_pte_at()
* which nowadays only sets a new PTE.
*
* Depending on the type of MMU, we may need to use atomic updates
* and the PTE may be either 32 or 64 bit wide. In the later case,
* when using atomic updates, only the low part of the PTE is
* accessed atomically.
*
* In addition, on 44x, we also maintain a global flag indicating
* that an executable user mapping was modified, which is needed
* to properly flush the virtually tagged instruction cache of
* those implementations.
*/
#ifndef pte_update
static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
unsigned long clr, unsigned long set, int huge)
{
pte_basic_t old = pte_val(*p);
pte_basic_t new = (old & ~(pte_basic_t)clr) | set;
*p = __pte(new);
if (IS_ENABLED(CONFIG_44x) && (old & _PAGE_USER) && (old & _PAGE_EXEC))
icache_44x_need_flush = 1;
/* huge pages use the old page table lock */
if (!huge)
assert_pte_locked(mm, addr);
return old;
}
#endif
/* Generic accessors to PTE bits */
#ifndef pte_write
static inline int pte_write(pte_t pte)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment