Commit d20506d4 authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman

powerpc/nohash: Add _PAGE_WRITE to supplement _PAGE_RW

Several places, _PAGE_RW maps to write permission and don't
always imply read. To make it more clear, do as book3s/64 in
commit c7d54842 ("powerpc/mm: Use _PAGE_READ to indicate
Read access") and use _PAGE_WRITE when more relevant.

For the time being _PAGE_WRITE is equivalent to _PAGE_RW but that
will change when _PAGE_READ gets added in following patches.
Signed-off-by: default avatarChristophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://msgid.link/1f79b88db54d030ada776dc9845e0e88345bfc28.1695659959.git.christophe.leroy@csgroup.eu
parent 58f53462
...@@ -49,6 +49,8 @@ ...@@ -49,6 +49,8 @@
#define _PAGE_EXEC 0x200 /* hardware: EX permission */ #define _PAGE_EXEC 0x200 /* hardware: EX permission */
#define _PAGE_ACCESSED 0x400 /* software: R: page referenced */ #define _PAGE_ACCESSED 0x400 /* software: R: page referenced */
#define _PAGE_WRITE _PAGE_RW
/* No page size encoding in the linux PTE */ /* No page size encoding in the linux PTE */
#define _PAGE_PSIZE 0 #define _PAGE_PSIZE 0
......
...@@ -75,6 +75,8 @@ ...@@ -75,6 +75,8 @@
#define _PAGE_NO_CACHE 0x00000400 /* H: I bit */ #define _PAGE_NO_CACHE 0x00000400 /* H: I bit */
#define _PAGE_WRITETHRU 0x00000800 /* H: W bit */ #define _PAGE_WRITETHRU 0x00000800 /* H: W bit */
#define _PAGE_WRITE _PAGE_RW
/* No page size encoding in the linux PTE */ /* No page size encoding in the linux PTE */
#define _PAGE_PSIZE 0 #define _PAGE_PSIZE 0
......
...@@ -31,6 +31,8 @@ ...@@ -31,6 +31,8 @@
#define _PAGE_WRITETHRU 0x00400 /* H: W bit */ #define _PAGE_WRITETHRU 0x00400 /* H: W bit */
#define _PAGE_SPECIAL 0x00800 /* S: Special page */ #define _PAGE_SPECIAL 0x00800 /* S: Special page */
#define _PAGE_WRITE _PAGE_RW
#define _PAGE_KERNEL_RO 0 #define _PAGE_KERNEL_RO 0
#define _PAGE_KERNEL_ROX _PAGE_EXEC #define _PAGE_KERNEL_ROX _PAGE_EXEC
#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW) #define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW)
......
...@@ -149,7 +149,7 @@ static inline void p4d_set(p4d_t *p4dp, unsigned long val) ...@@ -149,7 +149,7 @@ static inline void p4d_set(p4d_t *p4dp, unsigned long val)
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep) unsigned long addr, pte_t *ptep)
{ {
pte_update(mm, addr, ptep, _PAGE_RW, 0, 1); pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 1);
} }
#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
......
...@@ -84,7 +84,7 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, ...@@ -84,7 +84,7 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
pte_t *ptep) pte_t *ptep)
{ {
pte_update(mm, addr, ptep, _PAGE_RW, 0, 0); pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 0);
} }
#endif #endif
#define __HAVE_ARCH_PTEP_SET_WRPROTECT #define __HAVE_ARCH_PTEP_SET_WRPROTECT
...@@ -122,6 +122,9 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma, ...@@ -122,6 +122,9 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
#ifndef pte_mkwrite_novma #ifndef pte_mkwrite_novma
static inline pte_t pte_mkwrite_novma(pte_t pte) static inline pte_t pte_mkwrite_novma(pte_t pte)
{ {
/*
* write implies read, hence set both
*/
return __pte(pte_val(pte) | _PAGE_RW); return __pte(pte_val(pte) | _PAGE_RW);
} }
#endif #endif
...@@ -139,7 +142,7 @@ static inline pte_t pte_mkyoung(pte_t pte) ...@@ -139,7 +142,7 @@ static inline pte_t pte_mkyoung(pte_t pte)
#ifndef pte_wrprotect #ifndef pte_wrprotect
static inline pte_t pte_wrprotect(pte_t pte) static inline pte_t pte_wrprotect(pte_t pte)
{ {
return __pte(pte_val(pte) & ~_PAGE_RW); return __pte(pte_val(pte) & ~_PAGE_WRITE);
} }
#endif #endif
...@@ -153,7 +156,7 @@ static inline pte_t pte_mkexec(pte_t pte) ...@@ -153,7 +156,7 @@ static inline pte_t pte_mkexec(pte_t pte)
#ifndef pte_write #ifndef pte_write
static inline int pte_write(pte_t pte) static inline int pte_write(pte_t pte)
{ {
return pte_val(pte) & _PAGE_RW; return pte_val(pte) & _PAGE_WRITE;
} }
#endif #endif
#ifndef pte_read #ifndef pte_read
......
...@@ -55,6 +55,8 @@ ...@@ -55,6 +55,8 @@
#define _PAGE_KERNEL_ROX (_PAGE_BAP_SR | _PAGE_BAP_SX) #define _PAGE_KERNEL_ROX (_PAGE_BAP_SR | _PAGE_BAP_SX)
#define _PAGE_USER (_PAGE_BAP_UR | _PAGE_BAP_SR) /* Can be read */ #define _PAGE_USER (_PAGE_BAP_UR | _PAGE_BAP_SR) /* Can be read */
#define _PAGE_WRITE _PAGE_RW
#define _PAGE_SPECIAL _PAGE_SW0 #define _PAGE_SPECIAL _PAGE_SW0
/* Base page size */ /* Base page size */
......
...@@ -316,9 +316,9 @@ _ASM_NOKPROBE_SYMBOL(\name\()_virt) ...@@ -316,9 +316,9 @@ _ASM_NOKPROBE_SYMBOL(\name\()_virt)
andc. r9, r9, r11 /* Check permission */ andc. r9, r9, r11 /* Check permission */
bne 5f bne 5f
rlwinm r9, r11, 1, _PAGE_RW /* dirty => rw */ rlwinm r9, r11, 1, _PAGE_WRITE /* dirty => w */
and r9, r9, r11 /* hwwrite = dirty & rw */ and r9, r9, r11 /* hwwrite = dirty & w */
rlwimi r11, r9, 0, _PAGE_RW /* replace rw by hwwrite */ rlwimi r11, r9, 0, _PAGE_WRITE /* replace w by hwwrite */
/* Create TLB tag. This is the faulting address plus a static /* Create TLB tag. This is the faulting address plus a static
* set of bits. These are size, valid, E, U0. * set of bits. These are size, valid, E, U0.
...@@ -400,9 +400,9 @@ _ASM_NOKPROBE_SYMBOL(\name\()_virt) ...@@ -400,9 +400,9 @@ _ASM_NOKPROBE_SYMBOL(\name\()_virt)
andc. r9, r9, r11 /* Check permission */ andc. r9, r9, r11 /* Check permission */
bne 5f bne 5f
rlwinm r9, r11, 1, _PAGE_RW /* dirty => rw */ rlwinm r9, r11, 1, _PAGE_WRITE /* dirty => w */
and r9, r9, r11 /* hwwrite = dirty & rw */ and r9, r9, r11 /* hwwrite = dirty & w */
rlwimi r11, r9, 0, _PAGE_RW /* replace rw by hwwrite */ rlwimi r11, r9, 0, _PAGE_WRITE /* replace w by hwwrite */
/* Create TLB tag. This is the faulting address plus a static /* Create TLB tag. This is the faulting address plus a static
* set of bits. These are size, valid, E, U0. * set of bits. These are size, valid, E, U0.
......
...@@ -342,7 +342,7 @@ interrupt_base: ...@@ -342,7 +342,7 @@ interrupt_base:
mtspr SPRN_MMUCR,r12 mtspr SPRN_MMUCR,r12
/* Mask of required permission bits. Note that while we /* Mask of required permission bits. Note that while we
* do copy ESR:ST to _PAGE_RW position as trying to write * do copy ESR:ST to _PAGE_WRITE position as trying to write
* to an RO page is pretty common, we don't do it with * to an RO page is pretty common, we don't do it with
* _PAGE_DIRTY. We could do it, but it's a fairly rare * _PAGE_DIRTY. We could do it, but it's a fairly rare
* event so I'd rather take the overhead when it happens * event so I'd rather take the overhead when it happens
...@@ -586,7 +586,7 @@ finish_tlb_load_44x: ...@@ -586,7 +586,7 @@ finish_tlb_load_44x:
4: mtspr SPRN_MMUCR,r12 /* Set MMUCR */ 4: mtspr SPRN_MMUCR,r12 /* Set MMUCR */
/* Mask of required permission bits. Note that while we /* Mask of required permission bits. Note that while we
* do copy ESR:ST to _PAGE_RW position as trying to write * do copy ESR:ST to _PAGE_WRITE position as trying to write
* to an RO page is pretty common, we don't do it with * to an RO page is pretty common, we don't do it with
* _PAGE_DIRTY. We could do it, but it's a fairly rare * _PAGE_DIRTY. We could do it, but it's a fairly rare
* event so I'd rather take the overhead when it happens * event so I'd rather take the overhead when it happens
......
...@@ -471,7 +471,7 @@ END_BTB_FLUSH_SECTION ...@@ -471,7 +471,7 @@ END_BTB_FLUSH_SECTION
4: 4:
/* Mask of required permission bits. Note that while we /* Mask of required permission bits. Note that while we
* do copy ESR:ST to _PAGE_RW position as trying to write * do copy ESR:ST to _PAGE_WRITE position as trying to write
* to an RO page is pretty common, we don't do it with * to an RO page is pretty common, we don't do it with
* _PAGE_DIRTY. We could do it, but it's a fairly rare * _PAGE_DIRTY. We could do it, but it's a fairly rare
* event so I'd rather take the overhead when it happens * event so I'd rather take the overhead when it happens
......
...@@ -117,7 +117,7 @@ static void settlbcam(int index, unsigned long virt, phys_addr_t phys, ...@@ -117,7 +117,7 @@ static void settlbcam(int index, unsigned long virt, phys_addr_t phys,
TLBCAM[index].MAS2 |= (flags & _PAGE_ENDIAN) ? MAS2_E : 0; TLBCAM[index].MAS2 |= (flags & _PAGE_ENDIAN) ? MAS2_E : 0;
TLBCAM[index].MAS3 = (phys & MAS3_RPN) | MAS3_SR; TLBCAM[index].MAS3 = (phys & MAS3_RPN) | MAS3_SR;
TLBCAM[index].MAS3 |= (flags & _PAGE_RW) ? MAS3_SW : 0; TLBCAM[index].MAS3 |= (flags & _PAGE_WRITE) ? MAS3_SW : 0;
if (mmu_has_feature(MMU_FTR_BIG_PHYS)) if (mmu_has_feature(MMU_FTR_BIG_PHYS))
TLBCAM[index].MAS7 = (u64)phys >> 32; TLBCAM[index].MAS7 = (u64)phys >> 32;
...@@ -125,7 +125,7 @@ static void settlbcam(int index, unsigned long virt, phys_addr_t phys, ...@@ -125,7 +125,7 @@ static void settlbcam(int index, unsigned long virt, phys_addr_t phys,
if (!is_kernel_addr(virt)) { if (!is_kernel_addr(virt)) {
TLBCAM[index].MAS3 |= MAS3_UR; TLBCAM[index].MAS3 |= MAS3_UR;
TLBCAM[index].MAS3 |= (flags & _PAGE_EXEC) ? MAS3_UX : 0; TLBCAM[index].MAS3 |= (flags & _PAGE_EXEC) ? MAS3_UX : 0;
TLBCAM[index].MAS3 |= (flags & _PAGE_RW) ? MAS3_UW : 0; TLBCAM[index].MAS3 |= (flags & _PAGE_WRITE) ? MAS3_UW : 0;
} else { } else {
TLBCAM[index].MAS3 |= (flags & _PAGE_EXEC) ? MAS3_SX : 0; TLBCAM[index].MAS3 |= (flags & _PAGE_EXEC) ? MAS3_SX : 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment