Commit 93820bfe authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman

powerpc/44x: Introduce _PAGE_READ and remove _PAGE_USER

44x MMU has 6 page protection bits:
- R, W, X for supervisor
- R, W, X for user

It means that it can support X without R.

To do that, _PAGE_READ flag is needed but there is no bit available
for it in PTE. On the other hand the only real use of _PAGE_USER is
to implement PAGE_NONE by clearing _PAGE_USER.

As _PAGE_NONE can also be implemented by clearing _PAGE_READ,
remove _PAGE_USER and add _PAGE_READ. In order to insert bits in
one go during TLB miss, move _PAGE_ACCESSED and put _PAGE_READ
just after _PAGE_DIRTY so that _PAGE_DIRTY is copied into SW and
_PAGE_READ into SR at once.

With that change, 44x now also honors execute-only protection.
Signed-off-by: default avatarChristophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://msgid.link/043e17987b260b99b45094138c6cb2e89e63d499.1695659959.git.christophe.leroy@csgroup.eu
parent 48cf93bb
...@@ -63,28 +63,21 @@ ...@@ -63,28 +63,21 @@
*/ */
#define _PAGE_PRESENT 0x00000001 /* S: PTE valid */ #define _PAGE_PRESENT 0x00000001 /* S: PTE valid */
#define _PAGE_RW 0x00000002 /* S: Write permission */ #define _PAGE_WRITE 0x00000002 /* S: Write permission */
#define _PAGE_EXEC 0x00000004 /* H: Execute permission */ #define _PAGE_EXEC 0x00000004 /* H: Execute permission */
#define _PAGE_ACCESSED 0x00000008 /* S: Page referenced */ #define _PAGE_READ 0x00000008 /* S: Read permission */
#define _PAGE_DIRTY 0x00000010 /* S: Page dirty */ #define _PAGE_DIRTY 0x00000010 /* S: Page dirty */
#define _PAGE_SPECIAL 0x00000020 /* S: Special page */ #define _PAGE_SPECIAL 0x00000020 /* S: Special page */
#define _PAGE_USER 0x00000040 /* S: User page */ #define _PAGE_ACCESSED 0x00000040 /* S: Page referenced */
#define _PAGE_ENDIAN 0x00000080 /* H: E bit */ #define _PAGE_ENDIAN 0x00000080 /* H: E bit */
#define _PAGE_GUARDED 0x00000100 /* H: G bit */ #define _PAGE_GUARDED 0x00000100 /* H: G bit */
#define _PAGE_COHERENT 0x00000200 /* H: M bit */ #define _PAGE_COHERENT 0x00000200 /* H: M bit */
#define _PAGE_NO_CACHE 0x00000400 /* H: I bit */ #define _PAGE_NO_CACHE 0x00000400 /* H: I bit */
#define _PAGE_WRITETHRU 0x00000800 /* H: W bit */ #define _PAGE_WRITETHRU 0x00000800 /* H: W bit */
#define _PAGE_WRITE _PAGE_RW
/* No page size encoding in the linux PTE */ /* No page size encoding in the linux PTE */
#define _PAGE_PSIZE 0 #define _PAGE_PSIZE 0
#define _PAGE_KERNEL_RO 0
#define _PAGE_KERNEL_ROX _PAGE_EXEC
#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW)
#define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC)
/* TODO: Add large page lowmem mapping support */ /* TODO: Add large page lowmem mapping support */
#define _PMD_PRESENT 0 #define _PMD_PRESENT 0
#define _PMD_PRESENT_MASK (PAGE_MASK) #define _PMD_PRESENT_MASK (PAGE_MASK)
...@@ -107,14 +100,7 @@ ...@@ -107,14 +100,7 @@
#define _PAGE_BASE (_PAGE_BASE_NC) #define _PAGE_BASE (_PAGE_BASE_NC)
#endif #endif
/* Permission masks used to generate the __P and __S table */ #include <asm/pgtable-masks.h>
#define PAGE_NONE __pgprot(_PAGE_BASE)
#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_NOHASH_32_PTE_44x_H */ #endif /* _ASM_POWERPC_NOHASH_32_PTE_44x_H */
...@@ -314,8 +314,8 @@ interrupt_base: ...@@ -314,8 +314,8 @@ interrupt_base:
* kernel page tables. * kernel page tables.
*/ */
lis r11, PAGE_OFFSET@h lis r11, PAGE_OFFSET@h
cmplw r10, r11 cmplw cr7, r10, r11
blt+ 3f blt+ cr7, 3f
lis r11, swapper_pg_dir@h lis r11, swapper_pg_dir@h
ori r11, r11, swapper_pg_dir@l ori r11, r11, swapper_pg_dir@l
...@@ -355,7 +355,7 @@ interrupt_base: ...@@ -355,7 +355,7 @@ interrupt_base:
* place or can we save a couple of instructions here ? * place or can we save a couple of instructions here ?
*/ */
mfspr r12,SPRN_ESR mfspr r12,SPRN_ESR
li r13,_PAGE_PRESENT|_PAGE_ACCESSED li r13,_PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_READ
rlwimi r13,r12,10,30,30 rlwimi r13,r12,10,30,30
/* Load the PTE */ /* Load the PTE */
...@@ -428,8 +428,8 @@ interrupt_base: ...@@ -428,8 +428,8 @@ interrupt_base:
* kernel page tables. * kernel page tables.
*/ */
lis r11, PAGE_OFFSET@h lis r11, PAGE_OFFSET@h
cmplw r10, r11 cmplw cr7, r10, r11
blt+ 3f blt+ cr7, 3f
lis r11, swapper_pg_dir@h lis r11, swapper_pg_dir@h
ori r11, r11, swapper_pg_dir@l ori r11, r11, swapper_pg_dir@l
...@@ -515,6 +515,7 @@ interrupt_base: ...@@ -515,6 +515,7 @@ interrupt_base:
* r11 - PTE high word value * r11 - PTE high word value
* r12 - PTE low word value * r12 - PTE low word value
* r13 - TLB index * r13 - TLB index
* cr7 - Result of comparison with PAGE_OFFSET
* MMUCR - loaded with proper value when we get here * MMUCR - loaded with proper value when we get here
* Upon exit, we reload everything and RFI. * Upon exit, we reload everything and RFI.
*/ */
...@@ -533,11 +534,10 @@ finish_tlb_load_44x: ...@@ -533,11 +534,10 @@ finish_tlb_load_44x:
tlbwe r10,r13,PPC44x_TLB_PAGEID /* Write PAGEID */ tlbwe r10,r13,PPC44x_TLB_PAGEID /* Write PAGEID */
/* And WS 2 */ /* And WS 2 */
li r10,0xf85 /* Mask to apply from PTE */ li r10,0xf84 /* Mask to apply from PTE */
rlwimi r10,r12,29,30,30 /* DIRTY -> SW position */ rlwimi r10,r12,29,30,31 /* DIRTY,READ -> SW,SR position */
and r11,r12,r10 /* Mask PTE bits to keep */ and r11,r12,r10 /* Mask PTE bits to keep */
andi. r10,r12,_PAGE_USER /* User page ? */ bge cr7,1f /* User page ? no, leave U bits empty */
beq 1f /* nope, leave U bits empty */
rlwimi r11,r11,3,26,28 /* yes, copy S bits to U */ rlwimi r11,r11,3,26,28 /* yes, copy S bits to U */
rlwinm r11,r11,0,~PPC44x_TLB_SX /* Clear SX if User page */ rlwinm r11,r11,0,~PPC44x_TLB_SX /* Clear SX if User page */
1: tlbwe r11,r13,PPC44x_TLB_ATTRIB /* Write ATTRIB */ 1: tlbwe r11,r13,PPC44x_TLB_ATTRIB /* Write ATTRIB */
...@@ -568,8 +568,8 @@ finish_tlb_load_44x: ...@@ -568,8 +568,8 @@ finish_tlb_load_44x:
* kernel page tables. * kernel page tables.
*/ */
lis r11,PAGE_OFFSET@h lis r11,PAGE_OFFSET@h
cmplw cr0,r10,r11 cmplw cr7,r10,r11
blt+ 3f blt+ cr7,3f
lis r11,swapper_pg_dir@h lis r11,swapper_pg_dir@h
ori r11,r11, swapper_pg_dir@l ori r11,r11, swapper_pg_dir@l
li r12,0 /* MMUCR = 0 */ li r12,0 /* MMUCR = 0 */
...@@ -599,7 +599,7 @@ finish_tlb_load_44x: ...@@ -599,7 +599,7 @@ finish_tlb_load_44x:
* place or can we save a couple of instructions here ? * place or can we save a couple of instructions here ?
*/ */
mfspr r12,SPRN_ESR mfspr r12,SPRN_ESR
li r13,_PAGE_PRESENT|_PAGE_ACCESSED li r13,_PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_READ
rlwimi r13,r12,10,30,30 rlwimi r13,r12,10,30,30
/* Load the PTE */ /* Load the PTE */
...@@ -669,8 +669,8 @@ finish_tlb_load_44x: ...@@ -669,8 +669,8 @@ finish_tlb_load_44x:
* kernel page tables. * kernel page tables.
*/ */
lis r11,PAGE_OFFSET@h lis r11,PAGE_OFFSET@h
cmplw cr0,r10,r11 cmplw cr7,r10,r11
blt+ 3f blt+ cr7,3f
lis r11,swapper_pg_dir@h lis r11,swapper_pg_dir@h
ori r11,r11, swapper_pg_dir@l ori r11,r11, swapper_pg_dir@l
li r12,0 /* MMUCR = 0 */ li r12,0 /* MMUCR = 0 */
...@@ -744,6 +744,7 @@ finish_tlb_load_44x: ...@@ -744,6 +744,7 @@ finish_tlb_load_44x:
* r11 - PTE high word value * r11 - PTE high word value
* r12 - PTE low word value * r12 - PTE low word value
* r13 - free to use * r13 - free to use
* cr7 - Result of comparison with PAGE_OFFSET
* MMUCR - loaded with proper value when we get here * MMUCR - loaded with proper value when we get here
* Upon exit, we reload everything and RFI. * Upon exit, we reload everything and RFI.
*/ */
...@@ -753,11 +754,10 @@ finish_tlb_load_47x: ...@@ -753,11 +754,10 @@ finish_tlb_load_47x:
tlbwe r11,r13,1 tlbwe r11,r13,1
/* And make up word 2 */ /* And make up word 2 */
li r10,0xf85 /* Mask to apply from PTE */ li r10,0xf84 /* Mask to apply from PTE */
rlwimi r10,r12,29,30,30 /* DIRTY -> SW position */ rlwimi r10,r12,29,30,31 /* DIRTY,READ -> SW,SR position */
and r11,r12,r10 /* Mask PTE bits to keep */ and r11,r12,r10 /* Mask PTE bits to keep */
andi. r10,r12,_PAGE_USER /* User page ? */ bge cr7,1f /* User page ? no, leave U bits empty */
beq 1f /* nope, leave U bits empty */
rlwimi r11,r11,3,26,28 /* yes, copy S bits to U */ rlwimi r11,r11,3,26,28 /* yes, copy S bits to U */
rlwinm r11,r11,0,~PPC47x_TLB2_SX /* Clear SX if User page */ rlwinm r11,r11,0,~PPC47x_TLB2_SX /* Clear SX if User page */
1: tlbwe r11,r13,2 1: tlbwe r11,r13,2
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment