Commit 44ca0e00 authored by Catalin Marinas's avatar Catalin Marinas

Merge branch 'for-next/kernel-ptrauth' into for-next/core

* for-next/kernel-ptrauth:
  : Return address signing - in-kernel support
  arm64: Kconfig: verify binutils support for ARM64_PTR_AUTH
  lkdtm: arm64: test kernel pointer authentication
  arm64: compile the kernel with ptrauth return address signing
  kconfig: Add support for 'as-option'
  arm64: suspend: restore the kernel ptrauth keys
  arm64: __show_regs: strip PAC from lr in printk
  arm64: unwind: strip PAC from kernel addresses
  arm64: mask PAC bits of __builtin_return_address
  arm64: initialize ptrauth keys for kernel booting task
  arm64: initialize and switch ptrauth kernel keys
  arm64: enable ptrauth earlier
  arm64: cpufeature: handle conflicts based on capability
  arm64: cpufeature: Move cpu capability helpers inside C file
  arm64: ptrauth: Add bootup/runtime flags for __cpu_setup
  arm64: install user ptrauth keys at kernel exit time
  arm64: rename ptrauth key structures to be user-specific
  arm64: cpufeature: add pointer auth meta-capabilities
  arm64: cpufeature: Fix meta-capability cpufeature check
parents 806dc825 3b446c7d
...@@ -118,6 +118,7 @@ config ARM64 ...@@ -118,6 +118,7 @@ config ARM64
select HAVE_ALIGNED_STRUCT_PAGE if SLUB select HAVE_ALIGNED_STRUCT_PAGE if SLUB
select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_BITREVERSE select HAVE_ARCH_BITREVERSE
select HAVE_ARCH_COMPILER_H
select HAVE_ARCH_HUGE_VMAP select HAVE_ARCH_HUGE_VMAP
select HAVE_ARCH_JUMP_LABEL select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_JUMP_LABEL_RELATIVE select HAVE_ARCH_JUMP_LABEL_RELATIVE
...@@ -1501,6 +1502,9 @@ config ARM64_PTR_AUTH ...@@ -1501,6 +1502,9 @@ config ARM64_PTR_AUTH
bool "Enable support for pointer authentication" bool "Enable support for pointer authentication"
default y default y
depends on !KVM || ARM64_VHE depends on !KVM || ARM64_VHE
depends on (CC_HAS_SIGN_RETURN_ADDRESS || CC_HAS_BRANCH_PROT_PAC_RET) && AS_HAS_PAC
depends on CC_IS_GCC || (CC_IS_CLANG && AS_HAS_CFI_NEGATE_RA_STATE)
depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS)
help help
Pointer authentication (part of the ARMv8.3 Extensions) provides Pointer authentication (part of the ARMv8.3 Extensions) provides
instructions for signing and authenticating pointers against secret instructions for signing and authenticating pointers against secret
...@@ -1508,16 +1512,45 @@ config ARM64_PTR_AUTH ...@@ -1508,16 +1512,45 @@ config ARM64_PTR_AUTH
and other attacks. and other attacks.
This option enables these instructions at EL0 (i.e. for userspace). This option enables these instructions at EL0 (i.e. for userspace).
Choosing this option will cause the kernel to initialise secret keys Choosing this option will cause the kernel to initialise secret keys
for each process at exec() time, with these keys being for each process at exec() time, with these keys being
context-switched along with the process. context-switched along with the process.
If the compiler supports the -mbranch-protection or
-msign-return-address flag (e.g. GCC 7 or later), then this option
will also cause the kernel itself to be compiled with return address
protection. In this case, and if the target hardware is known to
support pointer authentication, then CONFIG_STACKPROTECTOR can be
disabled with minimal loss of protection.
The feature is detected at runtime. If the feature is not present in The feature is detected at runtime. If the feature is not present in
hardware it will not be advertised to userspace/KVM guest nor will it hardware it will not be advertised to userspace/KVM guest nor will it
be enabled. However, KVM guest also require VHE mode and hence be enabled. However, KVM guest also require VHE mode and hence
CONFIG_ARM64_VHE=y option to use this feature. CONFIG_ARM64_VHE=y option to use this feature.
If the feature is present on the boot CPU but not on a late CPU, then
the late CPU will be parked. Also, if the boot CPU does not have
address auth and the late CPU has then the late CPU will still boot
but with the feature disabled. On such a system, this option should
not be selected.
This feature works with FUNCTION_GRAPH_TRACER option only if
DYNAMIC_FTRACE_WITH_REGS is enabled.
config CC_HAS_BRANCH_PROT_PAC_RET
# GCC 9 or later, clang 8 or later
def_bool $(cc-option,-mbranch-protection=pac-ret+leaf)
config CC_HAS_SIGN_RETURN_ADDRESS
# GCC 7, 8
def_bool $(cc-option,-msign-return-address=all)
config AS_HAS_PAC
def_bool $(as-option,-Wa$(comma)-march=armv8.3-a)
config AS_HAS_CFI_NEGATE_RA_STATE
def_bool $(as-instr,.cfi_startproc\n.cfi_negate_ra_state\n.cfi_endproc\n)
endmenu endmenu
menu "ARMv8.4 architectural features" menu "ARMv8.4 architectural features"
......
...@@ -65,6 +65,17 @@ stack_protector_prepare: prepare0 ...@@ -65,6 +65,17 @@ stack_protector_prepare: prepare0
include/generated/asm-offsets.h)) include/generated/asm-offsets.h))
endif endif
ifeq ($(CONFIG_ARM64_PTR_AUTH),y)
branch-prot-flags-$(CONFIG_CC_HAS_SIGN_RETURN_ADDRESS) := -msign-return-address=all
branch-prot-flags-$(CONFIG_CC_HAS_BRANCH_PROT_PAC_RET) := -mbranch-protection=pac-ret+leaf
# -march=armv8.3-a enables the non-nops instructions for PAC, to avoid the
# compiler to generate them and consequently to break the single image contract
# we pass it only to the assembler. This option is utilized only in case of non
# integrated assemblers.
branch-prot-flags-$(CONFIG_AS_HAS_PAC) += -Wa,-march=armv8.3-a
KBUILD_CFLAGS += $(branch-prot-flags-y)
endif
ifeq ($(CONFIG_CPU_BIG_ENDIAN), y) ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
KBUILD_CPPFLAGS += -mbig-endian KBUILD_CPPFLAGS += -mbig-endian
CHECKFLAGS += -D__AARCH64EB__ CHECKFLAGS += -D__AARCH64EB__
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_ASM_POINTER_AUTH_H
#define __ASM_ASM_POINTER_AUTH_H
#include <asm/alternative.h>
#include <asm/asm-offsets.h>
#include <asm/cpufeature.h>
#include <asm/sysreg.h>
#ifdef CONFIG_ARM64_PTR_AUTH
/*
* thread.keys_user.ap* as offset exceeds the #imm offset range
* so use the base value of ldp as thread.keys_user and offset as
* thread.keys_user.ap*.
*/
.macro ptrauth_keys_install_user tsk, tmp1, tmp2, tmp3
mov \tmp1, #THREAD_KEYS_USER
add \tmp1, \tsk, \tmp1
alternative_if_not ARM64_HAS_ADDRESS_AUTH
b .Laddr_auth_skip_\@
alternative_else_nop_endif
ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APIA]
msr_s SYS_APIAKEYLO_EL1, \tmp2
msr_s SYS_APIAKEYHI_EL1, \tmp3
ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APIB]
msr_s SYS_APIBKEYLO_EL1, \tmp2
msr_s SYS_APIBKEYHI_EL1, \tmp3
ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APDA]
msr_s SYS_APDAKEYLO_EL1, \tmp2
msr_s SYS_APDAKEYHI_EL1, \tmp3
ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APDB]
msr_s SYS_APDBKEYLO_EL1, \tmp2
msr_s SYS_APDBKEYHI_EL1, \tmp3
.Laddr_auth_skip_\@:
alternative_if ARM64_HAS_GENERIC_AUTH
ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APGA]
msr_s SYS_APGAKEYLO_EL1, \tmp2
msr_s SYS_APGAKEYHI_EL1, \tmp3
alternative_else_nop_endif
.endm
.macro ptrauth_keys_install_kernel tsk, sync, tmp1, tmp2, tmp3
alternative_if ARM64_HAS_ADDRESS_AUTH
mov \tmp1, #THREAD_KEYS_KERNEL
add \tmp1, \tsk, \tmp1
ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_KERNEL_KEY_APIA]
msr_s SYS_APIAKEYLO_EL1, \tmp2
msr_s SYS_APIAKEYHI_EL1, \tmp3
.if \sync == 1
isb
.endif
alternative_else_nop_endif
.endm
#else /* CONFIG_ARM64_PTR_AUTH */
.macro ptrauth_keys_install_user tsk, tmp1, tmp2, tmp3
.endm
.macro ptrauth_keys_install_kernel tsk, sync, tmp1, tmp2, tmp3
.endm
#endif /* CONFIG_ARM64_PTR_AUTH */
#endif /* __ASM_ASM_POINTER_AUTH_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_COMPILER_H
#define __ASM_COMPILER_H
#if defined(CONFIG_ARM64_PTR_AUTH)
/*
* The EL0/EL1 pointer bits used by a pointer authentication code.
* This is dependent on TBI0/TBI1 being enabled, or bits 63:56 would also apply.
*/
#define ptrauth_user_pac_mask() GENMASK_ULL(54, vabits_actual)
#define ptrauth_kernel_pac_mask() GENMASK_ULL(63, vabits_actual)
/* Valid for EL0 TTBR0 and EL1 TTBR1 instruction pointers */
#define ptrauth_clear_pac(ptr) \
((ptr & BIT_ULL(55)) ? (ptr | ptrauth_kernel_pac_mask()) : \
(ptr & ~ptrauth_user_pac_mask()))
#define __builtin_return_address(val) \
(void *)(ptrauth_clear_pac((unsigned long)__builtin_return_address(val)))
#endif /* CONFIG_ARM64_PTR_AUTH */
#endif /* __ASM_COMPILER_H */
...@@ -59,7 +59,9 @@ ...@@ -59,7 +59,9 @@
#define ARM64_HAS_E0PD 49 #define ARM64_HAS_E0PD 49
#define ARM64_HAS_RNG 50 #define ARM64_HAS_RNG 50
#define ARM64_HAS_AMU_EXTN 51 #define ARM64_HAS_AMU_EXTN 51
#define ARM64_HAS_ADDRESS_AUTH 52
#define ARM64_HAS_GENERIC_AUTH 53
#define ARM64_NCAPS 52 #define ARM64_NCAPS 54
#endif /* __ASM_CPUCAPS_H */ #endif /* __ASM_CPUCAPS_H */
...@@ -208,6 +208,10 @@ extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0; ...@@ -208,6 +208,10 @@ extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0;
* In some non-typical cases either both (a) and (b), or neither, * In some non-typical cases either both (a) and (b), or neither,
* should be permitted. This can be described by including neither * should be permitted. This can be described by including neither
* or both flags in the capability's type field. * or both flags in the capability's type field.
*
* In case of a conflict, the CPU is prevented from booting. If the
* ARM64_CPUCAP_PANIC_ON_CONFLICT flag is specified for the capability,
* then a kernel panic is triggered.
*/ */
...@@ -240,6 +244,8 @@ extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0; ...@@ -240,6 +244,8 @@ extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0;
#define ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU ((u16)BIT(4)) #define ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU ((u16)BIT(4))
/* Is it safe for a late CPU to miss this capability when system has it */ /* Is it safe for a late CPU to miss this capability when system has it */
#define ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU ((u16)BIT(5)) #define ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU ((u16)BIT(5))
/* Panic when a conflict is detected */
#define ARM64_CPUCAP_PANIC_ON_CONFLICT ((u16)BIT(6))
/* /*
* CPU errata workarounds that need to be enabled at boot time if one or * CPU errata workarounds that need to be enabled at boot time if one or
...@@ -279,9 +285,20 @@ extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0; ...@@ -279,9 +285,20 @@ extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0;
/* /*
* CPU feature used early in the boot based on the boot CPU. All secondary * CPU feature used early in the boot based on the boot CPU. All secondary
* CPUs must match the state of the capability as detected by the boot CPU. * CPUs must match the state of the capability as detected by the boot CPU. In
* case of a conflict, a kernel panic is triggered.
*/
#define ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE \
(ARM64_CPUCAP_SCOPE_BOOT_CPU | ARM64_CPUCAP_PANIC_ON_CONFLICT)
/*
* CPU feature used early in the boot based on the boot CPU. It is safe for a
* late CPU to have this feature even though the boot CPU hasn't enabled it,
* although the feature will not be used by Linux in this case. If the boot CPU
* has enabled this feature already, then every late CPU must have it.
*/ */
#define ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE ARM64_CPUCAP_SCOPE_BOOT_CPU #define ARM64_CPUCAP_BOOT_CPU_FEATURE \
(ARM64_CPUCAP_SCOPE_BOOT_CPU | ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU)
struct arm64_cpu_capabilities { struct arm64_cpu_capabilities {
const char *desc; const char *desc;
...@@ -340,18 +357,6 @@ static inline int cpucap_default_scope(const struct arm64_cpu_capabilities *cap) ...@@ -340,18 +357,6 @@ static inline int cpucap_default_scope(const struct arm64_cpu_capabilities *cap)
return cap->type & ARM64_CPUCAP_SCOPE_MASK; return cap->type & ARM64_CPUCAP_SCOPE_MASK;
} }
static inline bool
cpucap_late_cpu_optional(const struct arm64_cpu_capabilities *cap)
{
return !!(cap->type & ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU);
}
static inline bool
cpucap_late_cpu_permitted(const struct arm64_cpu_capabilities *cap)
{
return !!(cap->type & ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU);
}
/* /*
* Generic helper for handling capabilties with multiple (match,enable) pairs * Generic helper for handling capabilties with multiple (match,enable) pairs
* of call backs, sharing the same capability bit. * of call backs, sharing the same capability bit.
...@@ -654,15 +659,13 @@ static inline bool system_supports_cnp(void) ...@@ -654,15 +659,13 @@ static inline bool system_supports_cnp(void)
static inline bool system_supports_address_auth(void) static inline bool system_supports_address_auth(void)
{ {
return IS_ENABLED(CONFIG_ARM64_PTR_AUTH) && return IS_ENABLED(CONFIG_ARM64_PTR_AUTH) &&
(cpus_have_const_cap(ARM64_HAS_ADDRESS_AUTH_ARCH) || cpus_have_const_cap(ARM64_HAS_ADDRESS_AUTH);
cpus_have_const_cap(ARM64_HAS_ADDRESS_AUTH_IMP_DEF));
} }
static inline bool system_supports_generic_auth(void) static inline bool system_supports_generic_auth(void)
{ {
return IS_ENABLED(CONFIG_ARM64_PTR_AUTH) && return IS_ENABLED(CONFIG_ARM64_PTR_AUTH) &&
(cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH_ARCH) || cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH);
cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH_IMP_DEF));
} }
static inline bool system_uses_irq_prio_masking(void) static inline bool system_uses_irq_prio_masking(void)
......
...@@ -22,7 +22,7 @@ struct ptrauth_key { ...@@ -22,7 +22,7 @@ struct ptrauth_key {
* We give each process its own keys, which are shared by all threads. The keys * We give each process its own keys, which are shared by all threads. The keys
* are inherited upon fork(), and reinitialised upon exec*(). * are inherited upon fork(), and reinitialised upon exec*().
*/ */
struct ptrauth_keys { struct ptrauth_keys_user {
struct ptrauth_key apia; struct ptrauth_key apia;
struct ptrauth_key apib; struct ptrauth_key apib;
struct ptrauth_key apda; struct ptrauth_key apda;
...@@ -30,7 +30,11 @@ struct ptrauth_keys { ...@@ -30,7 +30,11 @@ struct ptrauth_keys {
struct ptrauth_key apga; struct ptrauth_key apga;
}; };
static inline void ptrauth_keys_init(struct ptrauth_keys *keys) struct ptrauth_keys_kernel {
struct ptrauth_key apia;
};
static inline void ptrauth_keys_init_user(struct ptrauth_keys_user *keys)
{ {
if (system_supports_address_auth()) { if (system_supports_address_auth()) {
get_random_bytes(&keys->apia, sizeof(keys->apia)); get_random_bytes(&keys->apia, sizeof(keys->apia));
...@@ -50,48 +54,38 @@ do { \ ...@@ -50,48 +54,38 @@ do { \
write_sysreg_s(__pki_v.hi, SYS_ ## k ## KEYHI_EL1); \ write_sysreg_s(__pki_v.hi, SYS_ ## k ## KEYHI_EL1); \
} while (0) } while (0)
static inline void ptrauth_keys_switch(struct ptrauth_keys *keys) static __always_inline void ptrauth_keys_init_kernel(struct ptrauth_keys_kernel *keys)
{ {
if (system_supports_address_auth()) { if (system_supports_address_auth())
__ptrauth_key_install(APIA, keys->apia); get_random_bytes(&keys->apia, sizeof(keys->apia));
__ptrauth_key_install(APIB, keys->apib); }
__ptrauth_key_install(APDA, keys->apda);
__ptrauth_key_install(APDB, keys->apdb);
}
if (system_supports_generic_auth()) static __always_inline void ptrauth_keys_switch_kernel(struct ptrauth_keys_kernel *keys)
__ptrauth_key_install(APGA, keys->apga); {
if (system_supports_address_auth())
__ptrauth_key_install(APIA, keys->apia);
} }
extern int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg); extern int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg);
/*
* The EL0 pointer bits used by a pointer authentication code.
* This is dependent on TBI0 being enabled, or bits 63:56 would also apply.
*/
#define ptrauth_user_pac_mask() GENMASK(54, vabits_actual)
/* Only valid for EL0 TTBR0 instruction pointers */
static inline unsigned long ptrauth_strip_insn_pac(unsigned long ptr) static inline unsigned long ptrauth_strip_insn_pac(unsigned long ptr)
{ {
return ptr & ~ptrauth_user_pac_mask(); return ptrauth_clear_pac(ptr);
} }
#define ptrauth_thread_init_user(tsk) \ #define ptrauth_thread_init_user(tsk) \
do { \ ptrauth_keys_init_user(&(tsk)->thread.keys_user)
struct task_struct *__ptiu_tsk = (tsk); \ #define ptrauth_thread_init_kernel(tsk) \
ptrauth_keys_init(&__ptiu_tsk->thread.keys_user); \ ptrauth_keys_init_kernel(&(tsk)->thread.keys_kernel)
ptrauth_keys_switch(&__ptiu_tsk->thread.keys_user); \ #define ptrauth_thread_switch_kernel(tsk) \
} while (0) ptrauth_keys_switch_kernel(&(tsk)->thread.keys_kernel)
#define ptrauth_thread_switch(tsk) \
ptrauth_keys_switch(&(tsk)->thread.keys_user)
#else /* CONFIG_ARM64_PTR_AUTH */ #else /* CONFIG_ARM64_PTR_AUTH */
#define ptrauth_prctl_reset_keys(tsk, arg) (-EINVAL) #define ptrauth_prctl_reset_keys(tsk, arg) (-EINVAL)
#define ptrauth_strip_insn_pac(lr) (lr) #define ptrauth_strip_insn_pac(lr) (lr)
#define ptrauth_thread_init_user(tsk) #define ptrauth_thread_init_user(tsk)
#define ptrauth_thread_switch(tsk) #define ptrauth_thread_init_kernel(tsk)
#define ptrauth_thread_switch_kernel(tsk)
#endif /* CONFIG_ARM64_PTR_AUTH */ #endif /* CONFIG_ARM64_PTR_AUTH */
#endif /* __ASM_POINTER_AUTH_H */ #endif /* __ASM_POINTER_AUTH_H */
...@@ -146,7 +146,8 @@ struct thread_struct { ...@@ -146,7 +146,8 @@ struct thread_struct {
unsigned long fault_code; /* ESR_EL1 value */ unsigned long fault_code; /* ESR_EL1 value */
struct debug_info debug; /* debugging */ struct debug_info debug; /* debugging */
#ifdef CONFIG_ARM64_PTR_AUTH #ifdef CONFIG_ARM64_PTR_AUTH
struct ptrauth_keys keys_user; struct ptrauth_keys_user keys_user;
struct ptrauth_keys_kernel keys_kernel;
#endif #endif
}; };
......
...@@ -23,6 +23,14 @@ ...@@ -23,6 +23,14 @@
#define CPU_STUCK_REASON_52_BIT_VA (UL(1) << CPU_STUCK_REASON_SHIFT) #define CPU_STUCK_REASON_52_BIT_VA (UL(1) << CPU_STUCK_REASON_SHIFT)
#define CPU_STUCK_REASON_NO_GRAN (UL(2) << CPU_STUCK_REASON_SHIFT) #define CPU_STUCK_REASON_NO_GRAN (UL(2) << CPU_STUCK_REASON_SHIFT)
/* Possible options for __cpu_setup */
/* Option to setup primary cpu */
#define ARM64_CPU_BOOT_PRIMARY (1)
/* Option to setup secondary cpus */
#define ARM64_CPU_BOOT_SECONDARY (2)
/* Option to setup cpus for different cpu run time services */
#define ARM64_CPU_RUNTIME (3)
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <asm/percpu.h> #include <asm/percpu.h>
...@@ -30,6 +38,7 @@ ...@@ -30,6 +38,7 @@
#include <linux/threads.h> #include <linux/threads.h>
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <linux/thread_info.h> #include <linux/thread_info.h>
#include <asm/pointer_auth.h>
DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number); DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
...@@ -87,6 +96,9 @@ asmlinkage void secondary_start_kernel(void); ...@@ -87,6 +96,9 @@ asmlinkage void secondary_start_kernel(void);
struct secondary_data { struct secondary_data {
void *stack; void *stack;
struct task_struct *task; struct task_struct *task;
#ifdef CONFIG_ARM64_PTR_AUTH
struct ptrauth_keys_kernel ptrauth_key;
#endif
long status; long status;
}; };
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <linux/random.h> #include <linux/random.h>
#include <linux/version.h> #include <linux/version.h>
#include <asm/pointer_auth.h>
extern unsigned long __stack_chk_guard; extern unsigned long __stack_chk_guard;
...@@ -26,6 +27,7 @@ extern unsigned long __stack_chk_guard; ...@@ -26,6 +27,7 @@ extern unsigned long __stack_chk_guard;
*/ */
static __always_inline void boot_init_stack_canary(void) static __always_inline void boot_init_stack_canary(void)
{ {
#if defined(CONFIG_STACKPROTECTOR)
unsigned long canary; unsigned long canary;
/* Try to get a semi random initial value. */ /* Try to get a semi random initial value. */
...@@ -36,6 +38,9 @@ static __always_inline void boot_init_stack_canary(void) ...@@ -36,6 +38,9 @@ static __always_inline void boot_init_stack_canary(void)
current->stack_canary = canary; current->stack_canary = canary;
if (!IS_ENABLED(CONFIG_STACKPROTECTOR_PER_TASK)) if (!IS_ENABLED(CONFIG_STACKPROTECTOR_PER_TASK))
__stack_chk_guard = current->stack_canary; __stack_chk_guard = current->stack_canary;
#endif
ptrauth_thread_init_kernel(current);
ptrauth_thread_switch_kernel(current);
} }
#endif /* _ASM_STACKPROTECTOR_H */ #endif /* _ASM_STACKPROTECTOR_H */
...@@ -40,6 +40,10 @@ int main(void) ...@@ -40,6 +40,10 @@ int main(void)
#endif #endif
BLANK(); BLANK();
DEFINE(THREAD_CPU_CONTEXT, offsetof(struct task_struct, thread.cpu_context)); DEFINE(THREAD_CPU_CONTEXT, offsetof(struct task_struct, thread.cpu_context));
#ifdef CONFIG_ARM64_PTR_AUTH
DEFINE(THREAD_KEYS_USER, offsetof(struct task_struct, thread.keys_user));
DEFINE(THREAD_KEYS_KERNEL, offsetof(struct task_struct, thread.keys_kernel));
#endif
BLANK(); BLANK();
DEFINE(S_X0, offsetof(struct pt_regs, regs[0])); DEFINE(S_X0, offsetof(struct pt_regs, regs[0]));
DEFINE(S_X2, offsetof(struct pt_regs, regs[2])); DEFINE(S_X2, offsetof(struct pt_regs, regs[2]));
...@@ -88,6 +92,9 @@ int main(void) ...@@ -88,6 +92,9 @@ int main(void)
BLANK(); BLANK();
DEFINE(CPU_BOOT_STACK, offsetof(struct secondary_data, stack)); DEFINE(CPU_BOOT_STACK, offsetof(struct secondary_data, stack));
DEFINE(CPU_BOOT_TASK, offsetof(struct secondary_data, task)); DEFINE(CPU_BOOT_TASK, offsetof(struct secondary_data, task));
#ifdef CONFIG_ARM64_PTR_AUTH
DEFINE(CPU_BOOT_PTRAUTH_KEY, offsetof(struct secondary_data, ptrauth_key));
#endif
BLANK(); BLANK();
#ifdef CONFIG_KVM_ARM_HOST #ifdef CONFIG_KVM_ARM_HOST
DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt)); DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt));
...@@ -127,6 +134,15 @@ int main(void) ...@@ -127,6 +134,15 @@ int main(void)
#ifdef CONFIG_ARM_SDE_INTERFACE #ifdef CONFIG_ARM_SDE_INTERFACE
DEFINE(SDEI_EVENT_INTREGS, offsetof(struct sdei_registered_event, interrupted_regs)); DEFINE(SDEI_EVENT_INTREGS, offsetof(struct sdei_registered_event, interrupted_regs));
DEFINE(SDEI_EVENT_PRIORITY, offsetof(struct sdei_registered_event, priority)); DEFINE(SDEI_EVENT_PRIORITY, offsetof(struct sdei_registered_event, priority));
#endif
#ifdef CONFIG_ARM64_PTR_AUTH
DEFINE(PTRAUTH_USER_KEY_APIA, offsetof(struct ptrauth_keys_user, apia));
DEFINE(PTRAUTH_USER_KEY_APIB, offsetof(struct ptrauth_keys_user, apib));
DEFINE(PTRAUTH_USER_KEY_APDA, offsetof(struct ptrauth_keys_user, apda));
DEFINE(PTRAUTH_USER_KEY_APDB, offsetof(struct ptrauth_keys_user, apdb));
DEFINE(PTRAUTH_USER_KEY_APGA, offsetof(struct ptrauth_keys_user, apga));
DEFINE(PTRAUTH_KERNEL_KEY_APIA, offsetof(struct ptrauth_keys_kernel, apia));
BLANK();
#endif #endif
return 0; return 0;
} }
...@@ -116,6 +116,8 @@ cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused) ...@@ -116,6 +116,8 @@ cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused)
static void cpu_enable_cnp(struct arm64_cpu_capabilities const *cap); static void cpu_enable_cnp(struct arm64_cpu_capabilities const *cap);
static bool __system_matches_cap(unsigned int n);
/* /*
* NOTE: Any changes to the visibility of features should be kept in * NOTE: Any changes to the visibility of features should be kept in
* sync with the documentation of the CPU feature register ABI. * sync with the documentation of the CPU feature register ABI.
...@@ -1368,10 +1370,18 @@ static void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused) ...@@ -1368,10 +1370,18 @@ static void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused)
#endif /* CONFIG_ARM64_RAS_EXTN */ #endif /* CONFIG_ARM64_RAS_EXTN */
#ifdef CONFIG_ARM64_PTR_AUTH #ifdef CONFIG_ARM64_PTR_AUTH
static void cpu_enable_address_auth(struct arm64_cpu_capabilities const *cap) static bool has_address_auth(const struct arm64_cpu_capabilities *entry,
int __unused)
{
return __system_matches_cap(ARM64_HAS_ADDRESS_AUTH_ARCH) ||
__system_matches_cap(ARM64_HAS_ADDRESS_AUTH_IMP_DEF);
}
static bool has_generic_auth(const struct arm64_cpu_capabilities *entry,
int __unused)
{ {
sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | return __system_matches_cap(ARM64_HAS_GENERIC_AUTH_ARCH) ||
SCTLR_ELx_ENDA | SCTLR_ELx_ENDB); __system_matches_cap(ARM64_HAS_GENERIC_AUTH_IMP_DEF);
} }
#endif /* CONFIG_ARM64_PTR_AUTH */ #endif /* CONFIG_ARM64_PTR_AUTH */
...@@ -1399,6 +1409,25 @@ static bool can_use_gic_priorities(const struct arm64_cpu_capabilities *entry, ...@@ -1399,6 +1409,25 @@ static bool can_use_gic_priorities(const struct arm64_cpu_capabilities *entry,
} }
#endif #endif
/* Internal helper functions to match cpu capability type */
static bool
cpucap_late_cpu_optional(const struct arm64_cpu_capabilities *cap)
{
return !!(cap->type & ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU);
}
static bool
cpucap_late_cpu_permitted(const struct arm64_cpu_capabilities *cap)
{
return !!(cap->type & ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU);
}
static bool
cpucap_panic_on_conflict(const struct arm64_cpu_capabilities *cap)
{
return !!(cap->type & ARM64_CPUCAP_PANIC_ON_CONFLICT);
}
static const struct arm64_cpu_capabilities arm64_features[] = { static const struct arm64_cpu_capabilities arm64_features[] = {
{ {
.desc = "GIC system register CPU interface", .desc = "GIC system register CPU interface",
...@@ -1662,24 +1691,27 @@ static const struct arm64_cpu_capabilities arm64_features[] = { ...@@ -1662,24 +1691,27 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
{ {
.desc = "Address authentication (architected algorithm)", .desc = "Address authentication (architected algorithm)",
.capability = ARM64_HAS_ADDRESS_AUTH_ARCH, .capability = ARM64_HAS_ADDRESS_AUTH_ARCH,
.type = ARM64_CPUCAP_SYSTEM_FEATURE, .type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
.sys_reg = SYS_ID_AA64ISAR1_EL1, .sys_reg = SYS_ID_AA64ISAR1_EL1,
.sign = FTR_UNSIGNED, .sign = FTR_UNSIGNED,
.field_pos = ID_AA64ISAR1_APA_SHIFT, .field_pos = ID_AA64ISAR1_APA_SHIFT,
.min_field_value = ID_AA64ISAR1_APA_ARCHITECTED, .min_field_value = ID_AA64ISAR1_APA_ARCHITECTED,
.matches = has_cpuid_feature, .matches = has_cpuid_feature,
.cpu_enable = cpu_enable_address_auth,
}, },
{ {
.desc = "Address authentication (IMP DEF algorithm)", .desc = "Address authentication (IMP DEF algorithm)",
.capability = ARM64_HAS_ADDRESS_AUTH_IMP_DEF, .capability = ARM64_HAS_ADDRESS_AUTH_IMP_DEF,
.type = ARM64_CPUCAP_SYSTEM_FEATURE, .type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
.sys_reg = SYS_ID_AA64ISAR1_EL1, .sys_reg = SYS_ID_AA64ISAR1_EL1,
.sign = FTR_UNSIGNED, .sign = FTR_UNSIGNED,
.field_pos = ID_AA64ISAR1_API_SHIFT, .field_pos = ID_AA64ISAR1_API_SHIFT,
.min_field_value = ID_AA64ISAR1_API_IMP_DEF, .min_field_value = ID_AA64ISAR1_API_IMP_DEF,
.matches = has_cpuid_feature, .matches = has_cpuid_feature,
.cpu_enable = cpu_enable_address_auth, },
{
.capability = ARM64_HAS_ADDRESS_AUTH,
.type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
.matches = has_address_auth,
}, },
{ {
.desc = "Generic authentication (architected algorithm)", .desc = "Generic authentication (architected algorithm)",
...@@ -1701,6 +1733,11 @@ static const struct arm64_cpu_capabilities arm64_features[] = { ...@@ -1701,6 +1733,11 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.min_field_value = ID_AA64ISAR1_GPI_IMP_DEF, .min_field_value = ID_AA64ISAR1_GPI_IMP_DEF,
.matches = has_cpuid_feature, .matches = has_cpuid_feature,
}, },
{
.capability = ARM64_HAS_GENERIC_AUTH,
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_generic_auth,
},
#endif /* CONFIG_ARM64_PTR_AUTH */ #endif /* CONFIG_ARM64_PTR_AUTH */
#ifdef CONFIG_ARM64_PSEUDO_NMI #ifdef CONFIG_ARM64_PSEUDO_NMI
{ {
...@@ -2050,10 +2087,8 @@ static void __init enable_cpu_capabilities(u16 scope_mask) ...@@ -2050,10 +2087,8 @@ static void __init enable_cpu_capabilities(u16 scope_mask)
* Run through the list of capabilities to check for conflicts. * Run through the list of capabilities to check for conflicts.
* If the system has already detected a capability, take necessary * If the system has already detected a capability, take necessary
* action on this CPU. * action on this CPU.
*
* Returns "false" on conflicts.
*/ */
static bool verify_local_cpu_caps(u16 scope_mask) static void verify_local_cpu_caps(u16 scope_mask)
{ {
int i; int i;
bool cpu_has_cap, system_has_cap; bool cpu_has_cap, system_has_cap;
...@@ -2098,10 +2133,12 @@ static bool verify_local_cpu_caps(u16 scope_mask) ...@@ -2098,10 +2133,12 @@ static bool verify_local_cpu_caps(u16 scope_mask)
pr_crit("CPU%d: Detected conflict for capability %d (%s), System: %d, CPU: %d\n", pr_crit("CPU%d: Detected conflict for capability %d (%s), System: %d, CPU: %d\n",
smp_processor_id(), caps->capability, smp_processor_id(), caps->capability,
caps->desc, system_has_cap, cpu_has_cap); caps->desc, system_has_cap, cpu_has_cap);
return false;
}
return true; if (cpucap_panic_on_conflict(caps))
cpu_panic_kernel();
else
cpu_die_early();
}
} }
/* /*
...@@ -2111,12 +2148,8 @@ static bool verify_local_cpu_caps(u16 scope_mask) ...@@ -2111,12 +2148,8 @@ static bool verify_local_cpu_caps(u16 scope_mask)
static void check_early_cpu_features(void) static void check_early_cpu_features(void)
{ {
verify_cpu_asid_bits(); verify_cpu_asid_bits();
/*
* Early features are used by the kernel already. If there verify_local_cpu_caps(SCOPE_BOOT_CPU);
* is a conflict, we cannot proceed further.
*/
if (!verify_local_cpu_caps(SCOPE_BOOT_CPU))
cpu_panic_kernel();
} }
static void static void
...@@ -2164,8 +2197,7 @@ static void verify_local_cpu_capabilities(void) ...@@ -2164,8 +2197,7 @@ static void verify_local_cpu_capabilities(void)
* check_early_cpu_features(), as they need to be verified * check_early_cpu_features(), as they need to be verified
* on all secondary CPUs. * on all secondary CPUs.
*/ */
if (!verify_local_cpu_caps(SCOPE_ALL & ~SCOPE_BOOT_CPU)) verify_local_cpu_caps(SCOPE_ALL & ~SCOPE_BOOT_CPU);
cpu_die_early();
verify_local_elf_hwcaps(arm64_elf_hwcaps); verify_local_elf_hwcaps(arm64_elf_hwcaps);
...@@ -2216,6 +2248,23 @@ bool this_cpu_has_cap(unsigned int n) ...@@ -2216,6 +2248,23 @@ bool this_cpu_has_cap(unsigned int n)
return false; return false;
} }
/*
* This helper function is used in a narrow window when,
* - The system wide safe registers are set with all the SMP CPUs and,
* - The SYSTEM_FEATURE cpu_hwcaps may not have been set.
* In all other cases cpus_have_{const_}cap() should be used.
*/
static bool __system_matches_cap(unsigned int n)
{
if (n < ARM64_NCAPS) {
const struct arm64_cpu_capabilities *cap = cpu_hwcaps_ptrs[n];
if (cap)
return cap->matches(cap, SCOPE_SYSTEM);
}
return false;
}
void cpu_set_feature(unsigned int num) void cpu_set_feature(unsigned int num)
{ {
WARN_ON(num >= MAX_CPU_FEATURES); WARN_ON(num >= MAX_CPU_FEATURES);
...@@ -2288,7 +2337,7 @@ void __init setup_cpu_features(void) ...@@ -2288,7 +2337,7 @@ void __init setup_cpu_features(void)
static bool __maybe_unused static bool __maybe_unused
cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused) cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused)
{ {
return (cpus_have_const_cap(ARM64_HAS_PAN) && !cpus_have_const_cap(ARM64_HAS_UAO)); return (__system_matches_cap(ARM64_HAS_PAN) && !__system_matches_cap(ARM64_HAS_UAO));
} }
static void __maybe_unused cpu_enable_cnp(struct arm64_cpu_capabilities const *cap) static void __maybe_unused cpu_enable_cnp(struct arm64_cpu_capabilities const *cap)
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <asm/alternative.h> #include <asm/alternative.h>
#include <asm/assembler.h> #include <asm/assembler.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/asm_pointer_auth.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/esr.h> #include <asm/esr.h>
...@@ -177,6 +178,7 @@ alternative_cb_end ...@@ -177,6 +178,7 @@ alternative_cb_end
apply_ssbd 1, x22, x23 apply_ssbd 1, x22, x23
ptrauth_keys_install_kernel tsk, 1, x20, x22, x23
.else .else
add x21, sp, #S_FRAME_SIZE add x21, sp, #S_FRAME_SIZE
get_current_task tsk get_current_task tsk
...@@ -341,6 +343,9 @@ alternative_else_nop_endif ...@@ -341,6 +343,9 @@ alternative_else_nop_endif
msr cntkctl_el1, x1 msr cntkctl_el1, x1
4: 4:
#endif #endif
/* No kernel C function calls after this as user keys are set. */
ptrauth_keys_install_user tsk, x0, x1, x2
apply_ssbd 0, x0, x1 apply_ssbd 0, x0, x1
.endif .endif
...@@ -895,6 +900,7 @@ SYM_FUNC_START(cpu_switch_to) ...@@ -895,6 +900,7 @@ SYM_FUNC_START(cpu_switch_to)
ldr lr, [x8] ldr lr, [x8]
mov sp, x9 mov sp, x9
msr sp_el0, x1 msr sp_el0, x1
ptrauth_keys_install_kernel x1, 1, x8, x9, x10
ret ret
SYM_FUNC_END(cpu_switch_to) SYM_FUNC_END(cpu_switch_to)
NOKPROBE(cpu_switch_to) NOKPROBE(cpu_switch_to)
......
...@@ -118,6 +118,7 @@ SYM_CODE_START(stext) ...@@ -118,6 +118,7 @@ SYM_CODE_START(stext)
* On return, the CPU will be ready for the MMU to be turned on and * On return, the CPU will be ready for the MMU to be turned on and
* the TCR will have been set. * the TCR will have been set.
*/ */
mov x0, #ARM64_CPU_BOOT_PRIMARY
bl __cpu_setup // initialise processor bl __cpu_setup // initialise processor
b __primary_switch b __primary_switch
SYM_CODE_END(stext) SYM_CODE_END(stext)
...@@ -716,6 +717,7 @@ SYM_FUNC_START_LOCAL(secondary_startup) ...@@ -716,6 +717,7 @@ SYM_FUNC_START_LOCAL(secondary_startup)
* Common entry point for secondary CPUs. * Common entry point for secondary CPUs.
*/ */
bl __cpu_secondary_check52bitva bl __cpu_secondary_check52bitva
mov x0, #ARM64_CPU_BOOT_SECONDARY
bl __cpu_setup // initialise processor bl __cpu_setup // initialise processor
adrp x1, swapper_pg_dir adrp x1, swapper_pg_dir
bl __enable_mmu bl __enable_mmu
......
...@@ -9,7 +9,7 @@ ...@@ -9,7 +9,7 @@
int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg) int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg)
{ {
struct ptrauth_keys *keys = &tsk->thread.keys_user; struct ptrauth_keys_user *keys = &tsk->thread.keys_user;
unsigned long addr_key_mask = PR_PAC_APIAKEY | PR_PAC_APIBKEY | unsigned long addr_key_mask = PR_PAC_APIAKEY | PR_PAC_APIBKEY |
PR_PAC_APDAKEY | PR_PAC_APDBKEY; PR_PAC_APDAKEY | PR_PAC_APDBKEY;
unsigned long key_mask = addr_key_mask | PR_PAC_APGAKEY; unsigned long key_mask = addr_key_mask | PR_PAC_APGAKEY;
...@@ -18,8 +18,7 @@ int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg) ...@@ -18,8 +18,7 @@ int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg)
return -EINVAL; return -EINVAL;
if (!arg) { if (!arg) {
ptrauth_keys_init(keys); ptrauth_keys_init_user(keys);
ptrauth_keys_switch(keys);
return 0; return 0;
} }
...@@ -41,7 +40,5 @@ int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg) ...@@ -41,7 +40,5 @@ int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg)
if (arg & PR_PAC_APGAKEY) if (arg & PR_PAC_APGAKEY)
get_random_bytes(&keys->apga, sizeof(keys->apga)); get_random_bytes(&keys->apga, sizeof(keys->apga));
ptrauth_keys_switch(keys);
return 0; return 0;
} }
...@@ -262,7 +262,7 @@ void __show_regs(struct pt_regs *regs) ...@@ -262,7 +262,7 @@ void __show_regs(struct pt_regs *regs)
if (!user_mode(regs)) { if (!user_mode(regs)) {
printk("pc : %pS\n", (void *)regs->pc); printk("pc : %pS\n", (void *)regs->pc);
printk("lr : %pS\n", (void *)lr); printk("lr : %pS\n", (void *)ptrauth_strip_insn_pac(lr));
} else { } else {
printk("pc : %016llx\n", regs->pc); printk("pc : %016llx\n", regs->pc);
printk("lr : %016llx\n", lr); printk("lr : %016llx\n", lr);
...@@ -376,6 +376,8 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long stack_start, ...@@ -376,6 +376,8 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long stack_start,
*/ */
fpsimd_flush_task_state(p); fpsimd_flush_task_state(p);
ptrauth_thread_init_kernel(p);
if (likely(!(p->flags & PF_KTHREAD))) { if (likely(!(p->flags & PF_KTHREAD))) {
*childregs = *current_pt_regs(); *childregs = *current_pt_regs();
childregs->regs[0] = 0; childregs->regs[0] = 0;
...@@ -512,7 +514,6 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev, ...@@ -512,7 +514,6 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
contextidr_thread_switch(next); contextidr_thread_switch(next);
entry_task_switch(next); entry_task_switch(next);
uao_thread_switch(next); uao_thread_switch(next);
ptrauth_thread_switch(next);
ssbs_thread_switch(next); ssbs_thread_switch(next);
/* /*
......
...@@ -999,7 +999,7 @@ static struct ptrauth_key pac_key_from_user(__uint128_t ukey) ...@@ -999,7 +999,7 @@ static struct ptrauth_key pac_key_from_user(__uint128_t ukey)
} }
static void pac_address_keys_to_user(struct user_pac_address_keys *ukeys, static void pac_address_keys_to_user(struct user_pac_address_keys *ukeys,
const struct ptrauth_keys *keys) const struct ptrauth_keys_user *keys)
{ {
ukeys->apiakey = pac_key_to_user(&keys->apia); ukeys->apiakey = pac_key_to_user(&keys->apia);
ukeys->apibkey = pac_key_to_user(&keys->apib); ukeys->apibkey = pac_key_to_user(&keys->apib);
...@@ -1007,7 +1007,7 @@ static void pac_address_keys_to_user(struct user_pac_address_keys *ukeys, ...@@ -1007,7 +1007,7 @@ static void pac_address_keys_to_user(struct user_pac_address_keys *ukeys,
ukeys->apdbkey = pac_key_to_user(&keys->apdb); ukeys->apdbkey = pac_key_to_user(&keys->apdb);
} }
static void pac_address_keys_from_user(struct ptrauth_keys *keys, static void pac_address_keys_from_user(struct ptrauth_keys_user *keys,
const struct user_pac_address_keys *ukeys) const struct user_pac_address_keys *ukeys)
{ {
keys->apia = pac_key_from_user(ukeys->apiakey); keys->apia = pac_key_from_user(ukeys->apiakey);
...@@ -1021,7 +1021,7 @@ static int pac_address_keys_get(struct task_struct *target, ...@@ -1021,7 +1021,7 @@ static int pac_address_keys_get(struct task_struct *target,
unsigned int pos, unsigned int count, unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf) void *kbuf, void __user *ubuf)
{ {
struct ptrauth_keys *keys = &target->thread.keys_user; struct ptrauth_keys_user *keys = &target->thread.keys_user;
struct user_pac_address_keys user_keys; struct user_pac_address_keys user_keys;
if (!system_supports_address_auth()) if (!system_supports_address_auth())
...@@ -1038,7 +1038,7 @@ static int pac_address_keys_set(struct task_struct *target, ...@@ -1038,7 +1038,7 @@ static int pac_address_keys_set(struct task_struct *target,
unsigned int pos, unsigned int count, unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf) const void *kbuf, const void __user *ubuf)
{ {
struct ptrauth_keys *keys = &target->thread.keys_user; struct ptrauth_keys_user *keys = &target->thread.keys_user;
struct user_pac_address_keys user_keys; struct user_pac_address_keys user_keys;
int ret; int ret;
...@@ -1056,12 +1056,12 @@ static int pac_address_keys_set(struct task_struct *target, ...@@ -1056,12 +1056,12 @@ static int pac_address_keys_set(struct task_struct *target,
} }
static void pac_generic_keys_to_user(struct user_pac_generic_keys *ukeys, static void pac_generic_keys_to_user(struct user_pac_generic_keys *ukeys,
const struct ptrauth_keys *keys) const struct ptrauth_keys_user *keys)
{ {
ukeys->apgakey = pac_key_to_user(&keys->apga); ukeys->apgakey = pac_key_to_user(&keys->apga);
} }
static void pac_generic_keys_from_user(struct ptrauth_keys *keys, static void pac_generic_keys_from_user(struct ptrauth_keys_user *keys,
const struct user_pac_generic_keys *ukeys) const struct user_pac_generic_keys *ukeys)
{ {
keys->apga = pac_key_from_user(ukeys->apgakey); keys->apga = pac_key_from_user(ukeys->apgakey);
...@@ -1072,7 +1072,7 @@ static int pac_generic_keys_get(struct task_struct *target, ...@@ -1072,7 +1072,7 @@ static int pac_generic_keys_get(struct task_struct *target,
unsigned int pos, unsigned int count, unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf) void *kbuf, void __user *ubuf)
{ {
struct ptrauth_keys *keys = &target->thread.keys_user; struct ptrauth_keys_user *keys = &target->thread.keys_user;
struct user_pac_generic_keys user_keys; struct user_pac_generic_keys user_keys;
if (!system_supports_generic_auth()) if (!system_supports_generic_auth())
...@@ -1089,7 +1089,7 @@ static int pac_generic_keys_set(struct task_struct *target, ...@@ -1089,7 +1089,7 @@ static int pac_generic_keys_set(struct task_struct *target,
unsigned int pos, unsigned int count, unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf) const void *kbuf, const void __user *ubuf)
{ {
struct ptrauth_keys *keys = &target->thread.keys_user; struct ptrauth_keys_user *keys = &target->thread.keys_user;
struct user_pac_generic_keys user_keys; struct user_pac_generic_keys user_keys;
int ret; int ret;
......
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/assembler.h> #include <asm/assembler.h>
#include <asm/smp.h>
.text .text
/* /*
...@@ -99,6 +100,7 @@ ENDPROC(__cpu_suspend_enter) ...@@ -99,6 +100,7 @@ ENDPROC(__cpu_suspend_enter)
.pushsection ".idmap.text", "awx" .pushsection ".idmap.text", "awx"
ENTRY(cpu_resume) ENTRY(cpu_resume)
bl el2_setup // if in EL2 drop to EL1 cleanly bl el2_setup // if in EL2 drop to EL1 cleanly
mov x0, #ARM64_CPU_RUNTIME
bl __cpu_setup bl __cpu_setup
/* enable the MMU early - so we can access sleep_save_stash by va */ /* enable the MMU early - so we can access sleep_save_stash by va */
adrp x1, swapper_pg_dir adrp x1, swapper_pg_dir
......
...@@ -114,6 +114,10 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) ...@@ -114,6 +114,10 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
*/ */
secondary_data.task = idle; secondary_data.task = idle;
secondary_data.stack = task_stack_page(idle) + THREAD_SIZE; secondary_data.stack = task_stack_page(idle) + THREAD_SIZE;
#if defined(CONFIG_ARM64_PTR_AUTH)
secondary_data.ptrauth_key.apia.lo = idle->thread.keys_kernel.apia.lo;
secondary_data.ptrauth_key.apia.hi = idle->thread.keys_kernel.apia.hi;
#endif
update_cpu_boot_status(CPU_MMU_OFF); update_cpu_boot_status(CPU_MMU_OFF);
__flush_dcache_area(&secondary_data, sizeof(secondary_data)); __flush_dcache_area(&secondary_data, sizeof(secondary_data));
...@@ -136,6 +140,10 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) ...@@ -136,6 +140,10 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
pr_crit("CPU%u: failed to come online\n", cpu); pr_crit("CPU%u: failed to come online\n", cpu);
secondary_data.task = NULL; secondary_data.task = NULL;
secondary_data.stack = NULL; secondary_data.stack = NULL;
#if defined(CONFIG_ARM64_PTR_AUTH)
secondary_data.ptrauth_key.apia.lo = 0;
secondary_data.ptrauth_key.apia.hi = 0;
#endif
__flush_dcache_area(&secondary_data, sizeof(secondary_data)); __flush_dcache_area(&secondary_data, sizeof(secondary_data));
status = READ_ONCE(secondary_data.status); status = READ_ONCE(secondary_data.status);
if (status == CPU_MMU_OFF) if (status == CPU_MMU_OFF)
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <linux/stacktrace.h> #include <linux/stacktrace.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/pointer_auth.h>
#include <asm/stack_pointer.h> #include <asm/stack_pointer.h>
#include <asm/stacktrace.h> #include <asm/stacktrace.h>
...@@ -86,7 +87,7 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame) ...@@ -86,7 +87,7 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
if (tsk->ret_stack && if (tsk->ret_stack &&
(frame->pc == (unsigned long)return_to_handler)) { (ptrauth_strip_insn_pac(frame->pc) == (unsigned long)return_to_handler)) {
struct ftrace_ret_stack *ret_stack; struct ftrace_ret_stack *ret_stack;
/* /*
* This is a case where function graph tracer has * This is a case where function graph tracer has
...@@ -101,6 +102,8 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame) ...@@ -101,6 +102,8 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
} }
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
frame->pc = ptrauth_strip_insn_pac(frame->pc);
/* /*
* Frames created upon entry from EL0 have NULL FP and PC values, so * Frames created upon entry from EL0 have NULL FP and PC values, so
* don't bother reporting these. Frames created by __noreturn functions * don't bother reporting these. Frames created by __noreturn functions
......
...@@ -11,11 +11,13 @@ ...@@ -11,11 +11,13 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/assembler.h> #include <asm/assembler.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/asm_pointer_auth.h>
#include <asm/hwcap.h> #include <asm/hwcap.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/pgtable-hwdef.h> #include <asm/pgtable-hwdef.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/alternative.h> #include <asm/alternative.h>
#include <asm/smp.h>
#ifdef CONFIG_ARM64_64K_PAGES #ifdef CONFIG_ARM64_64K_PAGES
#define TCR_TG_FLAGS TCR_TG0_64K | TCR_TG1_64K #define TCR_TG_FLAGS TCR_TG0_64K | TCR_TG1_64K
...@@ -137,6 +139,7 @@ alternative_if ARM64_HAS_RAS_EXTN ...@@ -137,6 +139,7 @@ alternative_if ARM64_HAS_RAS_EXTN
msr_s SYS_DISR_EL1, xzr msr_s SYS_DISR_EL1, xzr
alternative_else_nop_endif alternative_else_nop_endif
ptrauth_keys_install_kernel x14, 0, x1, x2, x3
isb isb
ret ret
SYM_FUNC_END(cpu_do_resume) SYM_FUNC_END(cpu_do_resume)
...@@ -381,32 +384,32 @@ SYM_FUNC_END(idmap_kpti_install_ng_mappings) ...@@ -381,32 +384,32 @@ SYM_FUNC_END(idmap_kpti_install_ng_mappings)
/* /*
* __cpu_setup * __cpu_setup
* *
* Initialise the processor for turning the MMU on. Return in x0 the * Initialise the processor for turning the MMU on.
* value of the SCTLR_EL1 register. *
* Input:
* x0 with a flag ARM64_CPU_BOOT_PRIMARY/ARM64_CPU_BOOT_SECONDARY/ARM64_CPU_RUNTIME.
* Output:
* Return in x0 the value of the SCTLR_EL1 register.
*/ */
.pushsection ".idmap.text", "awx" .pushsection ".idmap.text", "awx"
SYM_FUNC_START(__cpu_setup) SYM_FUNC_START(__cpu_setup)
tlbi vmalle1 // Invalidate local TLB tlbi vmalle1 // Invalidate local TLB
dsb nsh dsb nsh
mov x0, #3 << 20 mov x1, #3 << 20
msr cpacr_el1, x0 // Enable FP/ASIMD msr cpacr_el1, x1 // Enable FP/ASIMD
mov x0, #1 << 12 // Reset mdscr_el1 and disable mov x1, #1 << 12 // Reset mdscr_el1 and disable
msr mdscr_el1, x0 // access to the DCC from EL0 msr mdscr_el1, x1 // access to the DCC from EL0
isb // Unmask debug exceptions now, isb // Unmask debug exceptions now,
enable_dbg // since this is per-cpu enable_dbg // since this is per-cpu
reset_pmuserenr_el0 x0 // Disable PMU access from EL0 reset_pmuserenr_el0 x1 // Disable PMU access from EL0
reset_amuserenr_el0 x0 // Disable AMU access from EL0 reset_amuserenr_el0 x1 // Disable AMU access from EL0
/* /*
* Memory region attributes * Memory region attributes
*/ */
mov_q x5, MAIR_EL1_SET mov_q x5, MAIR_EL1_SET
msr mair_el1, x5 msr mair_el1, x5
/*
* Prepare SCTLR
*/
mov_q x0, SCTLR_EL1_SET
/* /*
* Set/prepare TCR and TTBR. We use 512GB (39-bit) address range for * Set/prepare TCR and TTBR. We use 512GB (39-bit) address range for
* both user and kernel. * both user and kernel.
...@@ -443,5 +446,51 @@ SYM_FUNC_START(__cpu_setup) ...@@ -443,5 +446,51 @@ SYM_FUNC_START(__cpu_setup)
1: 1:
#endif /* CONFIG_ARM64_HW_AFDBM */ #endif /* CONFIG_ARM64_HW_AFDBM */
msr tcr_el1, x10 msr tcr_el1, x10
mov x1, x0
/*
* Prepare SCTLR
*/
mov_q x0, SCTLR_EL1_SET
#ifdef CONFIG_ARM64_PTR_AUTH
/* No ptrauth setup for run time cpus */
cmp x1, #ARM64_CPU_RUNTIME
b.eq 3f
/* Check if the CPU supports ptrauth */
mrs x2, id_aa64isar1_el1
ubfx x2, x2, #ID_AA64ISAR1_APA_SHIFT, #8
cbz x2, 3f
/*
* The primary cpu keys are reset here and can be
* re-initialised with some proper values later.
*/
msr_s SYS_APIAKEYLO_EL1, xzr
msr_s SYS_APIAKEYHI_EL1, xzr
/* Just enable ptrauth for primary cpu */
cmp x1, #ARM64_CPU_BOOT_PRIMARY
b.eq 2f
/* if !system_supports_address_auth() then skip enable */
alternative_if_not ARM64_HAS_ADDRESS_AUTH
b 3f
alternative_else_nop_endif
/* Install ptrauth key for secondary cpus */
adr_l x2, secondary_data
ldr x3, [x2, #CPU_BOOT_TASK] // get secondary_data.task
cbz x3, 2f // check for slow booting cpus
ldp x3, x4, [x2, #CPU_BOOT_PTRAUTH_KEY]
msr_s SYS_APIAKEYLO_EL1, x3
msr_s SYS_APIAKEYHI_EL1, x4
2: /* Enable ptrauth instructions */
ldr x2, =SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | \
SCTLR_ELx_ENDA | SCTLR_ELx_ENDB
orr x0, x0, x2
3:
#endif
ret // return to head.S ret // return to head.S
SYM_FUNC_END(__cpu_setup) SYM_FUNC_END(__cpu_setup)
...@@ -378,3 +378,39 @@ void lkdtm_DOUBLE_FAULT(void) ...@@ -378,3 +378,39 @@ void lkdtm_DOUBLE_FAULT(void)
pr_err("XFAIL: this test is ia32-only\n"); pr_err("XFAIL: this test is ia32-only\n");
#endif #endif
} }
#ifdef CONFIG_ARM64_PTR_AUTH
static noinline void change_pac_parameters(void)
{
/* Reset the keys of current task */
ptrauth_thread_init_kernel(current);
ptrauth_thread_switch_kernel(current);
}
#define CORRUPT_PAC_ITERATE 10
noinline void lkdtm_CORRUPT_PAC(void)
{
int i;
if (!system_supports_address_auth()) {
pr_err("FAIL: arm64 pointer authentication feature not present\n");
return;
}
pr_info("Change the PAC parameters to force function return failure\n");
/*
* Pac is a hash value computed from input keys, return address and
* stack pointer. As pac has fewer bits so there is a chance of
* collision, so iterate few times to reduce the collision probability.
*/
for (i = 0; i < CORRUPT_PAC_ITERATE; i++)
change_pac_parameters();
pr_err("FAIL: %s test failed. Kernel may be unstable from here\n", __func__);
}
#else /* !CONFIG_ARM64_PTR_AUTH */
noinline void lkdtm_CORRUPT_PAC(void)
{
pr_err("FAIL: arm64 pointer authentication config disabled\n");
}
#endif
...@@ -116,6 +116,7 @@ static const struct crashtype crashtypes[] = { ...@@ -116,6 +116,7 @@ static const struct crashtype crashtypes[] = {
CRASHTYPE(STACK_GUARD_PAGE_LEADING), CRASHTYPE(STACK_GUARD_PAGE_LEADING),
CRASHTYPE(STACK_GUARD_PAGE_TRAILING), CRASHTYPE(STACK_GUARD_PAGE_TRAILING),
CRASHTYPE(UNSET_SMEP), CRASHTYPE(UNSET_SMEP),
CRASHTYPE(CORRUPT_PAC),
CRASHTYPE(UNALIGNED_LOAD_STORE_WRITE), CRASHTYPE(UNALIGNED_LOAD_STORE_WRITE),
CRASHTYPE(OVERWRITE_ALLOCATION), CRASHTYPE(OVERWRITE_ALLOCATION),
CRASHTYPE(WRITE_AFTER_FREE), CRASHTYPE(WRITE_AFTER_FREE),
......
...@@ -31,6 +31,7 @@ void lkdtm_UNSET_SMEP(void); ...@@ -31,6 +31,7 @@ void lkdtm_UNSET_SMEP(void);
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
void lkdtm_DOUBLE_FAULT(void); void lkdtm_DOUBLE_FAULT(void);
#endif #endif
void lkdtm_CORRUPT_PAC(void);
/* lkdtm_heap.c */ /* lkdtm_heap.c */
void __init lkdtm_heap_init(void); void __init lkdtm_heap_init(void);
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/random.h> #include <linux/random.h>
#ifdef CONFIG_STACKPROTECTOR #if defined(CONFIG_STACKPROTECTOR) || defined(CONFIG_ARM64_PTR_AUTH)
# include <asm/stackprotector.h> # include <asm/stackprotector.h>
#else #else
static inline void boot_init_stack_canary(void) static inline void boot_init_stack_canary(void)
......
...@@ -31,6 +31,12 @@ cc-option = $(success,$(CC) -Werror $(CLANG_FLAGS) $(1) -S -x c /dev/null -o /de ...@@ -31,6 +31,12 @@ cc-option = $(success,$(CC) -Werror $(CLANG_FLAGS) $(1) -S -x c /dev/null -o /de
# Return y if the linker supports <flag>, n otherwise # Return y if the linker supports <flag>, n otherwise
ld-option = $(success,$(LD) -v $(1)) ld-option = $(success,$(LD) -v $(1))
# $(as-option,<flag>)
# /dev/zero is used as output instead of /dev/null as some assembler cribs when
# both input and output are same. Also both of them have same write behaviour so
# can be easily substituted.
as-option = $(success, $(CC) $(CLANG_FLAGS) $(1) -c -x assembler /dev/null -o /dev/zero)
# $(as-instr,<instr>) # $(as-instr,<instr>)
# Return y if the assembler supports <instr>, n otherwise # Return y if the assembler supports <instr>, n otherwise
as-instr = $(success,printf "%b\n" "$(1)" | $(CC) $(CLANG_FLAGS) -c -x assembler -o /dev/null -) as-instr = $(success,printf "%b\n" "$(1)" | $(CC) $(CLANG_FLAGS) -c -x assembler -o /dev/null -)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment