Commit c6a3c495 authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Michael Ellerman

powerpc/mm: Add helper for converting pte bit to hpte bits

Instead of open coding it in multiple code paths, export the helper
and add more documentation. Also make sure we don't make assumption
regarding pte bit position
Acked-by: default avatarScott Wood <scottwood@freescale.com>
Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent a43c0eb8
...@@ -236,6 +236,7 @@ extern unsigned long pmd_hugepage_update(struct mm_struct *mm, ...@@ -236,6 +236,7 @@ extern unsigned long pmd_hugepage_update(struct mm_struct *mm,
pmd_t *pmdp, pmd_t *pmdp,
unsigned long clr, unsigned long clr,
unsigned long set); unsigned long set);
extern unsigned long htab_convert_pte_flags(unsigned long pteflags);
/* Atomic PTE updates */ /* Atomic PTE updates */
static inline unsigned long pte_update(struct mm_struct *mm, static inline unsigned long pte_update(struct mm_struct *mm,
unsigned long addr, unsigned long addr,
......
...@@ -53,18 +53,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid, ...@@ -53,18 +53,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
* PP bits. _PAGE_USER is already PP bit 0x2, so we only * PP bits. _PAGE_USER is already PP bit 0x2, so we only
* need to add in 0x1 if it's a read-only user page * need to add in 0x1 if it's a read-only user page
*/ */
rflags = new_pte & _PAGE_USER; rflags = htab_convert_pte_flags(new_pte);
if ((new_pte & _PAGE_USER) && !((new_pte & _PAGE_RW) &&
(new_pte & _PAGE_DIRTY)))
rflags |= 0x1;
/*
* _PAGE_EXEC -> HW_NO_EXEC since it's inverted
*/
rflags |= ((new_pte & _PAGE_EXEC) ? 0 : HPTE_R_N);
/*
* Always add C and Memory coherence bit
*/
rflags |= HPTE_R_C | HPTE_R_M;
/* /*
* Add in WIMG bits * Add in WIMG bits
*/ */
......
...@@ -85,22 +85,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid, ...@@ -85,22 +85,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
* Handle the subpage protection bits * Handle the subpage protection bits
*/ */
subpg_pte = new_pte & ~subpg_prot; subpg_pte = new_pte & ~subpg_prot;
/* rflags = htab_convert_pte_flags(subpg_pte);
* PP bits. _PAGE_USER is already PP bit 0x2, so we only
* need to add in 0x1 if it's a read-only user page
*/
rflags = subpg_pte & _PAGE_USER;
if ((subpg_pte & _PAGE_USER) && !((subpg_pte & _PAGE_RW) &&
(subpg_pte & _PAGE_DIRTY)))
rflags |= 0x1;
/*
* _PAGE_EXEC -> HW_NO_EXEC since it's inverted
*/
rflags |= ((subpg_pte & _PAGE_EXEC) ? 0 : HPTE_R_N);
/*
* Always add C and Memory coherence bit
*/
rflags |= HPTE_R_C | HPTE_R_M;
/* /*
* Add in WIMG bits * Add in WIMG bits
*/ */
...@@ -271,22 +256,8 @@ int __hash_page_64K(unsigned long ea, unsigned long access, ...@@ -271,22 +256,8 @@ int __hash_page_64K(unsigned long ea, unsigned long access,
new_pte |= _PAGE_DIRTY; new_pte |= _PAGE_DIRTY;
} while (old_pte != __cmpxchg_u64((unsigned long *)ptep, } while (old_pte != __cmpxchg_u64((unsigned long *)ptep,
old_pte, new_pte)); old_pte, new_pte));
/*
* PP bits. _PAGE_USER is already PP bit 0x2, so we only rflags = htab_convert_pte_flags(new_pte);
* need to add in 0x1 if it's a read-only user page
*/
rflags = new_pte & _PAGE_USER;
if ((new_pte & _PAGE_USER) && !((new_pte & _PAGE_RW) &&
(new_pte & _PAGE_DIRTY)))
rflags |= 0x1;
/*
* _PAGE_EXEC -> HW_NO_EXEC since it's inverted
*/
rflags |= ((new_pte & _PAGE_EXEC) ? 0 : HPTE_R_N);
/*
* Always add C and Memory coherence bit
*/
rflags |= HPTE_R_C | HPTE_R_M;
/* /*
* Add in WIMG bits * Add in WIMG bits
*/ */
......
...@@ -159,20 +159,26 @@ static struct mmu_psize_def mmu_psize_defaults_gp[] = { ...@@ -159,20 +159,26 @@ static struct mmu_psize_def mmu_psize_defaults_gp[] = {
}, },
}; };
static unsigned long htab_convert_pte_flags(unsigned long pteflags) unsigned long htab_convert_pte_flags(unsigned long pteflags)
{ {
unsigned long rflags = pteflags & 0x1fa; unsigned long rflags = 0;
/* _PAGE_EXEC -> NOEXEC */ /* _PAGE_EXEC -> NOEXEC */
if ((pteflags & _PAGE_EXEC) == 0) if ((pteflags & _PAGE_EXEC) == 0)
rflags |= HPTE_R_N; rflags |= HPTE_R_N;
/*
/* PP bits. PAGE_USER is already PP bit 0x2, so we only * PP bits:
* need to add in 0x1 if it's a read-only user page * Linux use slb key 0 for kernel and 1 for user.
* kernel areas are mapped by PP bits 00
* and and there is no kernel RO (_PAGE_KERNEL_RO).
* User area mapped by 0x2 and read only use by
* 0x3.
*/ */
if ((pteflags & _PAGE_USER) && !((pteflags & _PAGE_RW) && if (pteflags & _PAGE_USER) {
(pteflags & _PAGE_DIRTY))) rflags |= 0x2;
rflags |= 1; if (!((pteflags & _PAGE_RW) && (pteflags & _PAGE_DIRTY)))
rflags |= 0x1;
}
/* /*
* Always add "C" bit for perf. Memory coherence is always enabled * Always add "C" bit for perf. Memory coherence is always enabled
*/ */
......
...@@ -54,18 +54,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid, ...@@ -54,18 +54,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
new_pmd |= _PAGE_DIRTY; new_pmd |= _PAGE_DIRTY;
} while (old_pmd != __cmpxchg_u64((unsigned long *)pmdp, } while (old_pmd != __cmpxchg_u64((unsigned long *)pmdp,
old_pmd, new_pmd)); old_pmd, new_pmd));
/* rflags = htab_convert_pte_flags(new_pmd);
* PP bits. _PAGE_USER is already PP bit 0x2, so we only
* need to add in 0x1 if it's a read-only user page
*/
rflags = new_pmd & _PAGE_USER;
if ((new_pmd & _PAGE_USER) && !((new_pmd & _PAGE_RW) &&
(new_pmd & _PAGE_DIRTY)))
rflags |= 0x1;
/*
* _PAGE_EXEC -> HW_NO_EXEC since it's inverted
*/
rflags |= ((new_pmd & _PAGE_EXEC) ? 0 : HPTE_R_N);
#if 0 #if 0
if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) { if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) {
......
...@@ -59,10 +59,8 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, ...@@ -59,10 +59,8 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
new_pte |= _PAGE_DIRTY; new_pte |= _PAGE_DIRTY;
} while(old_pte != __cmpxchg_u64((unsigned long *)ptep, } while(old_pte != __cmpxchg_u64((unsigned long *)ptep,
old_pte, new_pte)); old_pte, new_pte));
rflags = htab_convert_pte_flags(new_pte);
rflags = 0x2 | (!(new_pte & _PAGE_RW));
/* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */
rflags |= ((new_pte & _PAGE_EXEC) ? 0 : HPTE_R_N);
sz = ((1UL) << shift); sz = ((1UL) << shift);
if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
/* No CPU has hugepages but lacks no execute, so we /* No CPU has hugepages but lacks no execute, so we
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment