Commit 27a921e7 authored by Will Deacon's avatar Will Deacon

arm64: mm: Fix and re-enable ARM64_SW_TTBR0_PAN

With the ASID now installed in TTBR1, we can re-enable ARM64_SW_TTBR0_PAN
by ensuring that we switch to a reserved ASID of zero when disabling
user access and restore the active user ASID on the uaccess enable path.
Reviewed-by: default avatarMark Rutland <mark.rutland@arm.com>
Tested-by: default avatarLaura Abbott <labbott@redhat.com>
Tested-by: default avatarShanker Donthineni <shankerd@codeaurora.org>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
parent 158d4958
...@@ -910,7 +910,6 @@ endif ...@@ -910,7 +910,6 @@ endif
config ARM64_SW_TTBR0_PAN config ARM64_SW_TTBR0_PAN
bool "Emulate Privileged Access Never using TTBR0_EL1 switching" bool "Emulate Privileged Access Never using TTBR0_EL1 switching"
depends on BROKEN # Temporary while switch_mm is reworked
help help
Enabling this option prevents the kernel from accessing Enabling this option prevents the kernel from accessing
user-space memory directly by pointing TTBR0_EL1 to a reserved user-space memory directly by pointing TTBR0_EL1 to a reserved
......
...@@ -16,11 +16,20 @@ ...@@ -16,11 +16,20 @@
add \tmp1, \tmp1, #SWAPPER_DIR_SIZE // reserved_ttbr0 at the end of swapper_pg_dir add \tmp1, \tmp1, #SWAPPER_DIR_SIZE // reserved_ttbr0 at the end of swapper_pg_dir
msr ttbr0_el1, \tmp1 // set reserved TTBR0_EL1 msr ttbr0_el1, \tmp1 // set reserved TTBR0_EL1
isb isb
sub \tmp1, \tmp1, #SWAPPER_DIR_SIZE
bic \tmp1, \tmp1, #(0xffff << 48)
msr ttbr1_el1, \tmp1 // set reserved ASID
isb
.endm .endm
.macro __uaccess_ttbr0_enable, tmp1 .macro __uaccess_ttbr0_enable, tmp1, tmp2
get_thread_info \tmp1 get_thread_info \tmp1
ldr \tmp1, [\tmp1, #TSK_TI_TTBR0] // load saved TTBR0_EL1 ldr \tmp1, [\tmp1, #TSK_TI_TTBR0] // load saved TTBR0_EL1
mrs \tmp2, ttbr1_el1
extr \tmp2, \tmp2, \tmp1, #48
ror \tmp2, \tmp2, #16
msr ttbr1_el1, \tmp2 // set the active ASID
isb
msr ttbr0_el1, \tmp1 // set the non-PAN TTBR0_EL1 msr ttbr0_el1, \tmp1 // set the non-PAN TTBR0_EL1
isb isb
.endm .endm
...@@ -31,18 +40,18 @@ alternative_if_not ARM64_HAS_PAN ...@@ -31,18 +40,18 @@ alternative_if_not ARM64_HAS_PAN
alternative_else_nop_endif alternative_else_nop_endif
.endm .endm
.macro uaccess_ttbr0_enable, tmp1, tmp2 .macro uaccess_ttbr0_enable, tmp1, tmp2, tmp3
alternative_if_not ARM64_HAS_PAN alternative_if_not ARM64_HAS_PAN
save_and_disable_irq \tmp2 // avoid preemption save_and_disable_irq \tmp3 // avoid preemption
__uaccess_ttbr0_enable \tmp1 __uaccess_ttbr0_enable \tmp1, \tmp2
restore_irq \tmp2 restore_irq \tmp3
alternative_else_nop_endif alternative_else_nop_endif
.endm .endm
#else #else
.macro uaccess_ttbr0_disable, tmp1 .macro uaccess_ttbr0_disable, tmp1
.endm .endm
.macro uaccess_ttbr0_enable, tmp1, tmp2 .macro uaccess_ttbr0_enable, tmp1, tmp2, tmp3
.endm .endm
#endif #endif
...@@ -56,8 +65,8 @@ alternative_if ARM64_ALT_PAN_NOT_UAO ...@@ -56,8 +65,8 @@ alternative_if ARM64_ALT_PAN_NOT_UAO
alternative_else_nop_endif alternative_else_nop_endif
.endm .endm
.macro uaccess_enable_not_uao, tmp1, tmp2 .macro uaccess_enable_not_uao, tmp1, tmp2, tmp3
uaccess_ttbr0_enable \tmp1, \tmp2 uaccess_ttbr0_enable \tmp1, \tmp2, \tmp3
alternative_if ARM64_ALT_PAN_NOT_UAO alternative_if ARM64_ALT_PAN_NOT_UAO
SET_PSTATE_PAN(0) SET_PSTATE_PAN(0)
alternative_else_nop_endif alternative_else_nop_endif
......
...@@ -107,15 +107,19 @@ static inline void __uaccess_ttbr0_disable(void) ...@@ -107,15 +107,19 @@ static inline void __uaccess_ttbr0_disable(void)
{ {
unsigned long ttbr; unsigned long ttbr;
ttbr = read_sysreg(ttbr1_el1);
/* reserved_ttbr0 placed at the end of swapper_pg_dir */ /* reserved_ttbr0 placed at the end of swapper_pg_dir */
ttbr = read_sysreg(ttbr1_el1) + SWAPPER_DIR_SIZE; write_sysreg(ttbr + SWAPPER_DIR_SIZE, ttbr0_el1);
write_sysreg(ttbr, ttbr0_el1); isb();
/* Set reserved ASID */
ttbr &= ~(0xffffUL << 48);
write_sysreg(ttbr, ttbr1_el1);
isb(); isb();
} }
static inline void __uaccess_ttbr0_enable(void) static inline void __uaccess_ttbr0_enable(void)
{ {
unsigned long flags; unsigned long flags, ttbr0, ttbr1;
/* /*
* Disable interrupts to avoid preemption between reading the 'ttbr0' * Disable interrupts to avoid preemption between reading the 'ttbr0'
...@@ -123,7 +127,16 @@ static inline void __uaccess_ttbr0_enable(void) ...@@ -123,7 +127,16 @@ static inline void __uaccess_ttbr0_enable(void)
* roll-over and an update of 'ttbr0'. * roll-over and an update of 'ttbr0'.
*/ */
local_irq_save(flags); local_irq_save(flags);
write_sysreg(current_thread_info()->ttbr0, ttbr0_el1); ttbr0 = current_thread_info()->ttbr0;
/* Restore active ASID */
ttbr1 = read_sysreg(ttbr1_el1);
ttbr1 |= ttbr0 & (0xffffUL << 48);
write_sysreg(ttbr1, ttbr1_el1);
isb();
/* Restore user page table */
write_sysreg(ttbr0, ttbr0_el1);
isb(); isb();
local_irq_restore(flags); local_irq_restore(flags);
} }
......
...@@ -184,7 +184,7 @@ alternative_if ARM64_HAS_PAN ...@@ -184,7 +184,7 @@ alternative_if ARM64_HAS_PAN
alternative_else_nop_endif alternative_else_nop_endif
.if \el != 0 .if \el != 0
mrs x21, ttbr0_el1 mrs x21, ttbr1_el1
tst x21, #0xffff << 48 // Check for the reserved ASID tst x21, #0xffff << 48 // Check for the reserved ASID
orr x23, x23, #PSR_PAN_BIT // Set the emulated PAN in the saved SPSR orr x23, x23, #PSR_PAN_BIT // Set the emulated PAN in the saved SPSR
b.eq 1f // TTBR0 access already disabled b.eq 1f // TTBR0 access already disabled
...@@ -248,7 +248,7 @@ alternative_else_nop_endif ...@@ -248,7 +248,7 @@ alternative_else_nop_endif
tbnz x22, #22, 1f // Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set tbnz x22, #22, 1f // Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
.endif .endif
__uaccess_ttbr0_enable x0 __uaccess_ttbr0_enable x0, x1
.if \el == 0 .if \el == 0
/* /*
......
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
* Alignment fixed up by hardware. * Alignment fixed up by hardware.
*/ */
ENTRY(__clear_user) ENTRY(__clear_user)
uaccess_enable_not_uao x2, x3 uaccess_enable_not_uao x2, x3, x4
mov x2, x1 // save the size for fixup return mov x2, x1 // save the size for fixup return
subs x1, x1, #8 subs x1, x1, #8
b.mi 2f b.mi 2f
......
...@@ -64,7 +64,7 @@ ...@@ -64,7 +64,7 @@
end .req x5 end .req x5
ENTRY(__arch_copy_from_user) ENTRY(__arch_copy_from_user)
uaccess_enable_not_uao x3, x4 uaccess_enable_not_uao x3, x4, x5
add end, x0, x2 add end, x0, x2
#include "copy_template.S" #include "copy_template.S"
uaccess_disable_not_uao x3 uaccess_disable_not_uao x3
......
...@@ -65,7 +65,7 @@ ...@@ -65,7 +65,7 @@
end .req x5 end .req x5
ENTRY(raw_copy_in_user) ENTRY(raw_copy_in_user)
uaccess_enable_not_uao x3, x4 uaccess_enable_not_uao x3, x4, x5
add end, x0, x2 add end, x0, x2
#include "copy_template.S" #include "copy_template.S"
uaccess_disable_not_uao x3 uaccess_disable_not_uao x3
......
...@@ -63,7 +63,7 @@ ...@@ -63,7 +63,7 @@
end .req x5 end .req x5
ENTRY(__arch_copy_to_user) ENTRY(__arch_copy_to_user)
uaccess_enable_not_uao x3, x4 uaccess_enable_not_uao x3, x4, x5
add end, x0, x2 add end, x0, x2
#include "copy_template.S" #include "copy_template.S"
uaccess_disable_not_uao x3 uaccess_disable_not_uao x3
......
...@@ -49,7 +49,7 @@ ENTRY(flush_icache_range) ...@@ -49,7 +49,7 @@ ENTRY(flush_icache_range)
* - end - virtual end address of region * - end - virtual end address of region
*/ */
ENTRY(__flush_cache_user_range) ENTRY(__flush_cache_user_range)
uaccess_ttbr0_enable x2, x3 uaccess_ttbr0_enable x2, x3, x4
dcache_line_size x2, x3 dcache_line_size x2, x3
sub x3, x2, #1 sub x3, x2, #1
bic x4, x0, x3 bic x4, x0, x3
......
...@@ -101,7 +101,7 @@ ENTRY(privcmd_call) ...@@ -101,7 +101,7 @@ ENTRY(privcmd_call)
* need the explicit uaccess_enable/disable if the TTBR0 PAN emulation * need the explicit uaccess_enable/disable if the TTBR0 PAN emulation
* is enabled (it implies that hardware UAO and PAN disabled). * is enabled (it implies that hardware UAO and PAN disabled).
*/ */
uaccess_ttbr0_enable x6, x7 uaccess_ttbr0_enable x6, x7, x8
hvc XEN_IMM hvc XEN_IMM
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment