Commit ed815bd3 authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman

powerpc/40x: Introduce _PAGE_READ and remove _PAGE_USER

_PAGE_USER is used to select the zone. Today zone 0 is kernel
and zone 1 is user.

To implement _PAGE_NONE, _PAGE_USER is cleared, leading to no access
for user but kernel still has access to the page so it's possible for
a user application to write in that page by using a kernel function
as trampoline.

What is really wanted is to have user rights on pages below TASK_SIZE
and no user rights on pages above TASK_SIZE. Use zones for that.
There are 16 zones so lets use the 4 upper address bits to set the
zone and declare zone rights based on TASK_SIZE.

Then drop _PAGE_USER and reuse it as _PAGE_READ that will be checked
in Data TLB miss handler. That will properly handle PAGE_NONE for
both kernel and user.

In addition, it partially implements execute-only right. The
implementation won't be complete because once a TLB has been loaded
via the Instruction TLB miss handler, it will be possible to read
the page. But at least it can't be read unless it is executed first.
Signed-off-by: default avatarChristophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://msgid.link/2a13e3ba8a5dec43143cc1f9a91ec71ea1529f3c.1695659959.git.christophe.leroy@csgroup.eu
parent 93820bfe
......@@ -42,26 +42,19 @@
#define _PAGE_PRESENT 0x002 /* software: PTE contains a translation */
#define _PAGE_NO_CACHE 0x004 /* I: caching is inhibited */
#define _PAGE_WRITETHRU 0x008 /* W: caching is write-through */
#define _PAGE_USER 0x010 /* matches one of the zone permission bits */
#define _PAGE_READ 0x010 /* software: read permission */
#define _PAGE_SPECIAL 0x020 /* software: Special page */
#define _PAGE_DIRTY 0x080 /* software: dirty page */
#define _PAGE_RW 0x100 /* hardware: WR, anded with dirty in exception */
#define _PAGE_WRITE 0x100 /* hardware: WR, anded with dirty in exception */
#define _PAGE_EXEC 0x200 /* hardware: EX permission */
#define _PAGE_ACCESSED 0x400 /* software: R: page referenced */
#define _PAGE_WRITE _PAGE_RW
/* No page size encoding in the linux PTE */
#define _PAGE_PSIZE 0
/* cache related flags non existing on 40x */
#define _PAGE_COHERENT 0
#define _PAGE_KERNEL_RO 0
#define _PAGE_KERNEL_ROX _PAGE_EXEC
#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW)
#define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC)
#define _PMD_PRESENT 0x400 /* PMD points to page of PTEs */
#define _PMD_PRESENT_MASK _PMD_PRESENT
#define _PMD_BAD 0x802
......@@ -74,14 +67,7 @@
#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED)
#define _PAGE_BASE (_PAGE_BASE_NC)
/* Permission masks used to generate the __P and __S table */
#define PAGE_NONE __pgprot(_PAGE_BASE)
#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
#include <asm/pgtable-masks.h>
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_NOHASH_32_PTE_40x_H */
......@@ -312,7 +312,7 @@ _ASM_NOKPROBE_SYMBOL(\name\()_virt)
rlwimi r11, r10, 22, 20, 29 /* Compute PTE address */
lwz r11, 0(r11) /* Get Linux PTE */
li r9, _PAGE_PRESENT | _PAGE_ACCESSED
li r9, _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_READ
andc. r9, r9, r11 /* Check permission */
bne 5f
......@@ -561,10 +561,11 @@ finish_tlb_load:
/*
* Clear out the software-only bits in the PTE to generate the
* TLB_DATA value. These are the bottom 2 bits of the RPM, the
* top 3 bits of the zone field, and M.
* 4 bits of the zone field, and M.
*/
li r9, 0x0ce2
li r9, 0x0cf2
andc r11, r11, r9
rlwimi r11, r10, 8, 24, 27 /* Copy 4 upper address bit into zone */
/* load the next available TLB index. */
lwz r9, tlb_4xx_index@l(0)
......
......@@ -48,20 +48,25 @@
*/
void __init MMU_init_hw(void)
{
int i;
unsigned long zpr;
/*
* The Zone Protection Register (ZPR) defines how protection will
* be applied to every page which is a member of a given zone. At
* present, we utilize only two of the 4xx's zones.
* be applied to every page which is a member of a given zone.
* The zone index bits (of ZSEL) in the PTE are used for software
* indicators, except the LSB. For user access, zone 1 is used,
* for kernel access, zone 0 is used. We set all but zone 1
* to zero, allowing only kernel access as indicated in the PTE.
* For zone 1, we set a 01 binary (a value of 10 will not work)
* indicators. We use the 4 upper bits of virtual address to select
* the zone. We set all zones above TASK_SIZE to zero, allowing
* only kernel access as indicated in the PTE. For zones below
* TASK_SIZE, we set a 01 binary (a value of 10 will not work)
* to allow user access as indicated in the PTE. This also allows
* kernel access as indicated in the PTE.
*/
mtspr(SPRN_ZPR, 0x10000000);
for (i = 0, zpr = 0; i < TASK_SIZE >> 28; i++)
zpr |= 1 << (30 - i * 2);
mtspr(SPRN_ZPR, zpr);
flush_instruction_cache();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment