Commit cd22a8bf authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arm64-spectre-bhb-for-v5.17-2' of...

Merge tag 'arm64-spectre-bhb-for-v5.17-2' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 spectre fixes from James Morse:
 "ARM64 Spectre-BHB mitigations:

   - Make EL1 vectors per-cpu

   - Add mitigation sequences to the EL1 and EL2 vectors on vulnerble
     CPUs

   - Implement ARCH_WORKAROUND_3 for KVM guests

   - Report Vulnerable when unprivileged eBPF is enabled"

* tag 'arm64-spectre-bhb-for-v5.17-2' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64: proton-pack: Include unprivileged eBPF status in Spectre v2 mitigation reporting
  arm64: Use the clearbhb instruction in mitigations
  KVM: arm64: Allow SMCCC_ARCH_WORKAROUND_3 to be discovered and migrated
  arm64: Mitigate spectre style branch history side channels
  arm64: proton-pack: Report Spectre-BHB vulnerabilities as part of Spectre-v2
  arm64: Add percpu vectors for EL1
  arm64: entry: Add macro for reading symbol addresses from the trampoline
  arm64: entry: Add vectors that have the bhb mitigation sequences
  arm64: entry: Add non-kpti __bp_harden_el1_vectors for mitigations
  arm64: entry: Allow the trampoline text to occupy multiple pages
  arm64: entry: Make the kpti trampoline's kpti sequence optional
  arm64: entry: Move trampoline macros out of ifdef'd section
  arm64: entry: Don't assume tramp_vectors is the start of the vectors
  arm64: entry: Allow tramp_alias to access symbols after the 4K boundary
  arm64: entry: Move the trampoline data page before the text page
  arm64: entry: Free up another register on kpti's tramp_exit path
  arm64: entry: Make the trampoline cleanup optional
  KVM: arm64: Allow indirect vectors to be used without SPECTRE_V3A
  arm64: spectre: Rename spectre_v4_patch_fw_mitigation_conduit
  arm64: entry.S: Add ventry overflow sanity checks
parents fc55c23a 58c9a506
...@@ -1383,6 +1383,15 @@ config UNMAP_KERNEL_AT_EL0 ...@@ -1383,6 +1383,15 @@ config UNMAP_KERNEL_AT_EL0
If unsure, say Y. If unsure, say Y.
config MITIGATE_SPECTRE_BRANCH_HISTORY
bool "Mitigate Spectre style attacks against branch history" if EXPERT
default y
help
Speculation attacks against some high-performance processors can
make use of branch history to influence future speculation.
When taking an exception from user-space, a sequence of branches
or a firmware call overwrites the branch history.
config RODATA_FULL_DEFAULT_ENABLED config RODATA_FULL_DEFAULT_ENABLED
bool "Apply r/o permissions of VM areas also to their linear aliases" bool "Apply r/o permissions of VM areas also to their linear aliases"
default y default y
......
...@@ -108,6 +108,13 @@ ...@@ -108,6 +108,13 @@
hint #20 hint #20
.endm .endm
/*
* Clear Branch History instruction
*/
.macro clearbhb
hint #22
.endm
/* /*
* Speculation barrier * Speculation barrier
*/ */
...@@ -850,4 +857,50 @@ alternative_endif ...@@ -850,4 +857,50 @@ alternative_endif
#endif /* GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT */ #endif /* GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT */
.macro __mitigate_spectre_bhb_loop tmp
#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
alternative_cb spectre_bhb_patch_loop_iter
mov \tmp, #32 // Patched to correct the immediate
alternative_cb_end
.Lspectre_bhb_loop\@:
b . + 4
subs \tmp, \tmp, #1
b.ne .Lspectre_bhb_loop\@
sb
#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
.endm
.macro mitigate_spectre_bhb_loop tmp
#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
alternative_cb spectre_bhb_patch_loop_mitigation_enable
b .L_spectre_bhb_loop_done\@ // Patched to NOP
alternative_cb_end
__mitigate_spectre_bhb_loop \tmp
.L_spectre_bhb_loop_done\@:
#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
.endm
/* Save/restores x0-x3 to the stack */
.macro __mitigate_spectre_bhb_fw
#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
stp x0, x1, [sp, #-16]!
stp x2, x3, [sp, #-16]!
mov w0, #ARM_SMCCC_ARCH_WORKAROUND_3
alternative_cb smccc_patch_fw_mitigation_conduit
nop // Patched to SMC/HVC #0
alternative_cb_end
ldp x2, x3, [sp], #16
ldp x0, x1, [sp], #16
#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
.endm
.macro mitigate_spectre_bhb_clear_insn
#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
alternative_cb spectre_bhb_patch_clearbhb
/* Patched to NOP when not supported */
clearbhb
isb
alternative_cb_end
#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
.endm
#endif /* __ASM_ASSEMBLER_H */ #endif /* __ASM_ASSEMBLER_H */
...@@ -637,6 +637,35 @@ static inline bool cpu_supports_mixed_endian_el0(void) ...@@ -637,6 +637,35 @@ static inline bool cpu_supports_mixed_endian_el0(void)
return id_aa64mmfr0_mixed_endian_el0(read_cpuid(ID_AA64MMFR0_EL1)); return id_aa64mmfr0_mixed_endian_el0(read_cpuid(ID_AA64MMFR0_EL1));
} }
static inline bool supports_csv2p3(int scope)
{
u64 pfr0;
u8 csv2_val;
if (scope == SCOPE_LOCAL_CPU)
pfr0 = read_sysreg_s(SYS_ID_AA64PFR0_EL1);
else
pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
csv2_val = cpuid_feature_extract_unsigned_field(pfr0,
ID_AA64PFR0_CSV2_SHIFT);
return csv2_val == 3;
}
static inline bool supports_clearbhb(int scope)
{
u64 isar2;
if (scope == SCOPE_LOCAL_CPU)
isar2 = read_sysreg_s(SYS_ID_AA64ISAR2_EL1);
else
isar2 = read_sanitised_ftr_reg(SYS_ID_AA64ISAR2_EL1);
return cpuid_feature_extract_unsigned_field(isar2,
ID_AA64ISAR2_CLEARBHB_SHIFT);
}
const struct cpumask *system_32bit_el0_cpumask(void); const struct cpumask *system_32bit_el0_cpumask(void);
DECLARE_STATIC_KEY_FALSE(arm64_mismatched_32bit_el0); DECLARE_STATIC_KEY_FALSE(arm64_mismatched_32bit_el0);
......
...@@ -73,10 +73,14 @@ ...@@ -73,10 +73,14 @@
#define ARM_CPU_PART_CORTEX_A76 0xD0B #define ARM_CPU_PART_CORTEX_A76 0xD0B
#define ARM_CPU_PART_NEOVERSE_N1 0xD0C #define ARM_CPU_PART_NEOVERSE_N1 0xD0C
#define ARM_CPU_PART_CORTEX_A77 0xD0D #define ARM_CPU_PART_CORTEX_A77 0xD0D
#define ARM_CPU_PART_NEOVERSE_V1 0xD40
#define ARM_CPU_PART_CORTEX_A78 0xD41
#define ARM_CPU_PART_CORTEX_X1 0xD44
#define ARM_CPU_PART_CORTEX_A510 0xD46 #define ARM_CPU_PART_CORTEX_A510 0xD46
#define ARM_CPU_PART_CORTEX_A710 0xD47 #define ARM_CPU_PART_CORTEX_A710 0xD47
#define ARM_CPU_PART_CORTEX_X2 0xD48 #define ARM_CPU_PART_CORTEX_X2 0xD48
#define ARM_CPU_PART_NEOVERSE_N2 0xD49 #define ARM_CPU_PART_NEOVERSE_N2 0xD49
#define ARM_CPU_PART_CORTEX_A78C 0xD4B
#define APM_CPU_PART_POTENZA 0x000 #define APM_CPU_PART_POTENZA 0x000
...@@ -117,10 +121,14 @@ ...@@ -117,10 +121,14 @@
#define MIDR_CORTEX_A76 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76) #define MIDR_CORTEX_A76 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76)
#define MIDR_NEOVERSE_N1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N1) #define MIDR_NEOVERSE_N1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N1)
#define MIDR_CORTEX_A77 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A77) #define MIDR_CORTEX_A77 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A77)
#define MIDR_NEOVERSE_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V1)
#define MIDR_CORTEX_A78 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78)
#define MIDR_CORTEX_X1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1)
#define MIDR_CORTEX_A510 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A510) #define MIDR_CORTEX_A510 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A510)
#define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710) #define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710)
#define MIDR_CORTEX_X2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X2) #define MIDR_CORTEX_X2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X2)
#define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N2) #define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N2)
#define MIDR_CORTEX_A78C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78C)
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX) #define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX) #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
#define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX) #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
......
...@@ -62,9 +62,11 @@ enum fixed_addresses { ...@@ -62,9 +62,11 @@ enum fixed_addresses {
#endif /* CONFIG_ACPI_APEI_GHES */ #endif /* CONFIG_ACPI_APEI_GHES */
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
FIX_ENTRY_TRAMP_TEXT3,
FIX_ENTRY_TRAMP_TEXT2,
FIX_ENTRY_TRAMP_TEXT1,
FIX_ENTRY_TRAMP_DATA, FIX_ENTRY_TRAMP_DATA,
FIX_ENTRY_TRAMP_TEXT, #define TRAMP_VALIAS (__fix_to_virt(FIX_ENTRY_TRAMP_TEXT1))
#define TRAMP_VALIAS (__fix_to_virt(FIX_ENTRY_TRAMP_TEXT))
#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
__end_of_permanent_fixed_addresses, __end_of_permanent_fixed_addresses,
......
...@@ -65,6 +65,7 @@ enum aarch64_insn_hint_cr_op { ...@@ -65,6 +65,7 @@ enum aarch64_insn_hint_cr_op {
AARCH64_INSN_HINT_PSB = 0x11 << 5, AARCH64_INSN_HINT_PSB = 0x11 << 5,
AARCH64_INSN_HINT_TSB = 0x12 << 5, AARCH64_INSN_HINT_TSB = 0x12 << 5,
AARCH64_INSN_HINT_CSDB = 0x14 << 5, AARCH64_INSN_HINT_CSDB = 0x14 << 5,
AARCH64_INSN_HINT_CLEARBHB = 0x16 << 5,
AARCH64_INSN_HINT_BTI = 0x20 << 5, AARCH64_INSN_HINT_BTI = 0x20 << 5,
AARCH64_INSN_HINT_BTIC = 0x22 << 5, AARCH64_INSN_HINT_BTIC = 0x22 << 5,
......
...@@ -714,6 +714,11 @@ static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt) ...@@ -714,6 +714,11 @@ static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt)
ctxt_sys_reg(cpu_ctxt, MPIDR_EL1) = read_cpuid_mpidr(); ctxt_sys_reg(cpu_ctxt, MPIDR_EL1) = read_cpuid_mpidr();
} }
static inline bool kvm_system_needs_idmapped_vectors(void)
{
return cpus_have_const_cap(ARM64_SPECTRE_V3A);
}
void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu); void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu);
static inline void kvm_arch_hardware_unsetup(void) {} static inline void kvm_arch_hardware_unsetup(void) {}
......
...@@ -23,4 +23,9 @@ extern char __mmuoff_data_start[], __mmuoff_data_end[]; ...@@ -23,4 +23,9 @@ extern char __mmuoff_data_start[], __mmuoff_data_end[];
extern char __entry_tramp_text_start[], __entry_tramp_text_end[]; extern char __entry_tramp_text_start[], __entry_tramp_text_end[];
extern char __relocate_new_kernel_start[], __relocate_new_kernel_end[]; extern char __relocate_new_kernel_start[], __relocate_new_kernel_end[];
static inline size_t entry_tramp_text_size(void)
{
return __entry_tramp_text_end - __entry_tramp_text_start;
}
#endif /* __ASM_SECTIONS_H */ #endif /* __ASM_SECTIONS_H */
...@@ -93,5 +93,9 @@ void spectre_v4_enable_task_mitigation(struct task_struct *tsk); ...@@ -93,5 +93,9 @@ void spectre_v4_enable_task_mitigation(struct task_struct *tsk);
enum mitigation_state arm64_get_meltdown_state(void); enum mitigation_state arm64_get_meltdown_state(void);
enum mitigation_state arm64_get_spectre_bhb_state(void);
bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, int scope);
u8 spectre_bhb_loop_affected(int scope);
void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* __ASM_SPECTRE_H */ #endif /* __ASM_SPECTRE_H */
...@@ -773,6 +773,7 @@ ...@@ -773,6 +773,7 @@
#define ID_AA64ISAR1_GPI_IMP_DEF 0x1 #define ID_AA64ISAR1_GPI_IMP_DEF 0x1
/* id_aa64isar2 */ /* id_aa64isar2 */
#define ID_AA64ISAR2_CLEARBHB_SHIFT 28
#define ID_AA64ISAR2_RPRES_SHIFT 4 #define ID_AA64ISAR2_RPRES_SHIFT 4
#define ID_AA64ISAR2_WFXT_SHIFT 0 #define ID_AA64ISAR2_WFXT_SHIFT 0
...@@ -904,6 +905,7 @@ ...@@ -904,6 +905,7 @@
#endif #endif
/* id_aa64mmfr1 */ /* id_aa64mmfr1 */
#define ID_AA64MMFR1_ECBHB_SHIFT 60
#define ID_AA64MMFR1_AFP_SHIFT 44 #define ID_AA64MMFR1_AFP_SHIFT 44
#define ID_AA64MMFR1_ETS_SHIFT 36 #define ID_AA64MMFR1_ETS_SHIFT 36
#define ID_AA64MMFR1_TWED_SHIFT 32 #define ID_AA64MMFR1_TWED_SHIFT 32
......
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2022 ARM Ltd.
*/
#ifndef __ASM_VECTORS_H
#define __ASM_VECTORS_H
#include <linux/bug.h>
#include <linux/percpu.h>
#include <asm/fixmap.h>
extern char vectors[];
extern char tramp_vectors[];
extern char __bp_harden_el1_vectors[];
/*
* Note: the order of this enum corresponds to two arrays in entry.S:
* tramp_vecs and __bp_harden_el1_vectors. By default the canonical
* 'full fat' vectors are used directly.
*/
enum arm64_bp_harden_el1_vectors {
#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
/*
* Perform the BHB loop mitigation, before branching to the canonical
* vectors.
*/
EL1_VECTOR_BHB_LOOP,
/*
* Make the SMC call for firmware mitigation, before branching to the
* canonical vectors.
*/
EL1_VECTOR_BHB_FW,
/*
* Use the ClearBHB instruction, before branching to the canonical
* vectors.
*/
EL1_VECTOR_BHB_CLEAR_INSN,
#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
/*
* Remap the kernel before branching to the canonical vectors.
*/
EL1_VECTOR_KPTI,
};
#ifndef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
#define EL1_VECTOR_BHB_LOOP -1
#define EL1_VECTOR_BHB_FW -1
#define EL1_VECTOR_BHB_CLEAR_INSN -1
#endif /* !CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
/* The vectors to use on return from EL0. e.g. to remap the kernel */
DECLARE_PER_CPU_READ_MOSTLY(const char *, this_cpu_vector);
#ifndef CONFIG_UNMAP_KERNEL_AT_EL0
#define TRAMP_VALIAS 0
#endif
static inline const char *
arm64_get_bp_hardening_vector(enum arm64_bp_harden_el1_vectors slot)
{
if (arm64_kernel_unmapped_at_el0())
return (char *)TRAMP_VALIAS + SZ_2K * slot;
WARN_ON_ONCE(slot == EL1_VECTOR_KPTI);
return __bp_harden_el1_vectors + SZ_2K * slot;
}
#endif /* __ASM_VECTORS_H */
...@@ -281,6 +281,11 @@ struct kvm_arm_copy_mte_tags { ...@@ -281,6 +281,11 @@ struct kvm_arm_copy_mte_tags {
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED 3 #define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED 3
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED (1U << 4) #define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED (1U << 4)
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3 KVM_REG_ARM_FW_REG(3)
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_AVAIL 0
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_AVAIL 1
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_REQUIRED 2
/* SVE registers */ /* SVE registers */
#define KVM_REG_ARM64_SVE (0x15 << KVM_REG_ARM_COPROC_SHIFT) #define KVM_REG_ARM64_SVE (0x15 << KVM_REG_ARM_COPROC_SHIFT)
......
...@@ -502,6 +502,13 @@ const struct arm64_cpu_capabilities arm64_errata[] = { ...@@ -502,6 +502,13 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
.matches = has_spectre_v4, .matches = has_spectre_v4,
.cpu_enable = spectre_v4_enable_mitigation, .cpu_enable = spectre_v4_enable_mitigation,
}, },
{
.desc = "Spectre-BHB",
.capability = ARM64_SPECTRE_BHB,
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
.matches = is_spectre_bhb_affected,
.cpu_enable = spectre_bhb_enable_mitigation,
},
#ifdef CONFIG_ARM64_ERRATUM_1418040 #ifdef CONFIG_ARM64_ERRATUM_1418040
{ {
.desc = "ARM erratum 1418040", .desc = "ARM erratum 1418040",
......
...@@ -73,6 +73,8 @@ ...@@ -73,6 +73,8 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/kasan.h> #include <linux/kasan.h>
#include <linux/percpu.h>
#include <asm/cpu.h> #include <asm/cpu.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/cpu_ops.h> #include <asm/cpu_ops.h>
...@@ -85,6 +87,7 @@ ...@@ -85,6 +87,7 @@
#include <asm/smp.h> #include <asm/smp.h>
#include <asm/sysreg.h> #include <asm/sysreg.h>
#include <asm/traps.h> #include <asm/traps.h>
#include <asm/vectors.h>
#include <asm/virt.h> #include <asm/virt.h>
/* Kernel representation of AT_HWCAP and AT_HWCAP2 */ /* Kernel representation of AT_HWCAP and AT_HWCAP2 */
...@@ -110,6 +113,8 @@ DECLARE_BITMAP(boot_capabilities, ARM64_NPATCHABLE); ...@@ -110,6 +113,8 @@ DECLARE_BITMAP(boot_capabilities, ARM64_NPATCHABLE);
bool arm64_use_ng_mappings = false; bool arm64_use_ng_mappings = false;
EXPORT_SYMBOL(arm64_use_ng_mappings); EXPORT_SYMBOL(arm64_use_ng_mappings);
DEFINE_PER_CPU_READ_MOSTLY(const char *, this_cpu_vector) = vectors;
/* /*
* Permit PER_LINUX32 and execve() of 32-bit binaries even if not all CPUs * Permit PER_LINUX32 and execve() of 32-bit binaries even if not all CPUs
* support it? * support it?
...@@ -226,6 +231,7 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = { ...@@ -226,6 +231,7 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
}; };
static const struct arm64_ftr_bits ftr_id_aa64isar2[] = { static const struct arm64_ftr_bits ftr_id_aa64isar2[] = {
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_AA64ISAR2_CLEARBHB_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_RPRES_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_RPRES_SHIFT, 4, 0),
ARM64_FTR_END, ARM64_FTR_END,
}; };
...@@ -1590,6 +1596,12 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused) ...@@ -1590,6 +1596,12 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
int cpu = smp_processor_id(); int cpu = smp_processor_id();
if (__this_cpu_read(this_cpu_vector) == vectors) {
const char *v = arm64_get_bp_hardening_vector(EL1_VECTOR_KPTI);
__this_cpu_write(this_cpu_vector, v);
}
/* /*
* We don't need to rewrite the page-tables if either we've done * We don't need to rewrite the page-tables if either we've done
* it already or we have KASLR enabled and therefore have not * it already or we have KASLR enabled and therefore have not
......
...@@ -37,18 +37,21 @@ ...@@ -37,18 +37,21 @@
.macro kernel_ventry, el:req, ht:req, regsize:req, label:req .macro kernel_ventry, el:req, ht:req, regsize:req, label:req
.align 7 .align 7
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 .Lventry_start\@:
.if \el == 0 .if \el == 0
alternative_if ARM64_UNMAP_KERNEL_AT_EL0 /*
* This must be the first instruction of the EL0 vector entries. It is
* skipped by the trampoline vectors, to trigger the cleanup.
*/
b .Lskip_tramp_vectors_cleanup\@
.if \regsize == 64 .if \regsize == 64
mrs x30, tpidrro_el0 mrs x30, tpidrro_el0
msr tpidrro_el0, xzr msr tpidrro_el0, xzr
.else .else
mov x30, xzr mov x30, xzr
.endif .endif
alternative_else_nop_endif .Lskip_tramp_vectors_cleanup\@:
.endif .endif
#endif
sub sp, sp, #PT_REGS_SIZE sub sp, sp, #PT_REGS_SIZE
#ifdef CONFIG_VMAP_STACK #ifdef CONFIG_VMAP_STACK
...@@ -95,11 +98,15 @@ alternative_else_nop_endif ...@@ -95,11 +98,15 @@ alternative_else_nop_endif
mrs x0, tpidrro_el0 mrs x0, tpidrro_el0
#endif #endif
b el\el\ht\()_\regsize\()_\label b el\el\ht\()_\regsize\()_\label
.org .Lventry_start\@ + 128 // Did we overflow the ventry slot?
.endm .endm
.macro tramp_alias, dst, sym .macro tramp_alias, dst, sym, tmp
mov_q \dst, TRAMP_VALIAS mov_q \dst, TRAMP_VALIAS
add \dst, \dst, #(\sym - .entry.tramp.text) adr_l \tmp, \sym
add \dst, \dst, \tmp
adr_l \tmp, .entry.tramp.text
sub \dst, \dst, \tmp
.endm .endm
/* /*
...@@ -116,7 +123,7 @@ alternative_cb_end ...@@ -116,7 +123,7 @@ alternative_cb_end
tbnz \tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@ tbnz \tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@
mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2 mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
mov w1, #\state mov w1, #\state
alternative_cb spectre_v4_patch_fw_mitigation_conduit alternative_cb smccc_patch_fw_mitigation_conduit
nop // Patched to SMC/HVC #0 nop // Patched to SMC/HVC #0
alternative_cb_end alternative_cb_end
.L__asm_ssbd_skip\@: .L__asm_ssbd_skip\@:
...@@ -413,21 +420,26 @@ alternative_else_nop_endif ...@@ -413,21 +420,26 @@ alternative_else_nop_endif
ldp x24, x25, [sp, #16 * 12] ldp x24, x25, [sp, #16 * 12]
ldp x26, x27, [sp, #16 * 13] ldp x26, x27, [sp, #16 * 13]
ldp x28, x29, [sp, #16 * 14] ldp x28, x29, [sp, #16 * 14]
ldr lr, [sp, #S_LR]
add sp, sp, #PT_REGS_SIZE // restore sp
.if \el == 0 .if \el == 0
alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0 alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
ldr lr, [sp, #S_LR]
add sp, sp, #PT_REGS_SIZE // restore sp
eret
alternative_else_nop_endif
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
bne 4f bne 4f
msr far_el1, x30 msr far_el1, x29
tramp_alias x30, tramp_exit_native tramp_alias x30, tramp_exit_native, x29
br x30 br x30
4: 4:
tramp_alias x30, tramp_exit_compat tramp_alias x30, tramp_exit_compat, x29
br x30 br x30
#endif #endif
.else .else
ldr lr, [sp, #S_LR]
add sp, sp, #PT_REGS_SIZE // restore sp
/* Ensure any device/NC reads complete */ /* Ensure any device/NC reads complete */
alternative_insn nop, "dmb sy", ARM64_WORKAROUND_1508412 alternative_insn nop, "dmb sy", ARM64_WORKAROUND_1508412
...@@ -594,12 +606,6 @@ SYM_CODE_END(ret_to_user) ...@@ -594,12 +606,6 @@ SYM_CODE_END(ret_to_user)
.popsection // .entry.text .popsection // .entry.text
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
/*
* Exception vectors trampoline.
*/
.pushsection ".entry.tramp.text", "ax"
// Move from tramp_pg_dir to swapper_pg_dir // Move from tramp_pg_dir to swapper_pg_dir
.macro tramp_map_kernel, tmp .macro tramp_map_kernel, tmp
mrs \tmp, ttbr1_el1 mrs \tmp, ttbr1_el1
...@@ -633,12 +639,47 @@ alternative_else_nop_endif ...@@ -633,12 +639,47 @@ alternative_else_nop_endif
*/ */
.endm .endm
.macro tramp_ventry, regsize = 64 .macro tramp_data_page dst
adr_l \dst, .entry.tramp.text
sub \dst, \dst, PAGE_SIZE
.endm
.macro tramp_data_read_var dst, var
#ifdef CONFIG_RANDOMIZE_BASE
tramp_data_page \dst
add \dst, \dst, #:lo12:__entry_tramp_data_\var
ldr \dst, [\dst]
#else
ldr \dst, =\var
#endif
.endm
#define BHB_MITIGATION_NONE 0
#define BHB_MITIGATION_LOOP 1
#define BHB_MITIGATION_FW 2
#define BHB_MITIGATION_INSN 3
.macro tramp_ventry, vector_start, regsize, kpti, bhb
.align 7 .align 7
1: 1:
.if \regsize == 64 .if \regsize == 64
msr tpidrro_el0, x30 // Restored in kernel_ventry msr tpidrro_el0, x30 // Restored in kernel_ventry
.endif .endif
.if \bhb == BHB_MITIGATION_LOOP
/*
* This sequence must appear before the first indirect branch. i.e. the
* ret out of tramp_ventry. It appears here because x30 is free.
*/
__mitigate_spectre_bhb_loop x30
.endif // \bhb == BHB_MITIGATION_LOOP
.if \bhb == BHB_MITIGATION_INSN
clearbhb
isb
.endif // \bhb == BHB_MITIGATION_INSN
.if \kpti == 1
/* /*
* Defend against branch aliasing attacks by pushing a dummy * Defend against branch aliasing attacks by pushing a dummy
* entry onto the return stack and using a RET instruction to * entry onto the return stack and using a RET instruction to
...@@ -648,46 +689,75 @@ alternative_else_nop_endif ...@@ -648,46 +689,75 @@ alternative_else_nop_endif
b . b .
2: 2:
tramp_map_kernel x30 tramp_map_kernel x30
#ifdef CONFIG_RANDOMIZE_BASE
adr x30, tramp_vectors + PAGE_SIZE
alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003 alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
ldr x30, [x30] tramp_data_read_var x30, vectors
#else
ldr x30, =vectors
#endif
alternative_if_not ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM alternative_if_not ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM
prfm plil1strm, [x30, #(1b - tramp_vectors)] prfm plil1strm, [x30, #(1b - \vector_start)]
alternative_else_nop_endif alternative_else_nop_endif
msr vbar_el1, x30 msr vbar_el1, x30
add x30, x30, #(1b - tramp_vectors)
isb isb
.else
ldr x30, =vectors
.endif // \kpti == 1
.if \bhb == BHB_MITIGATION_FW
/*
* The firmware sequence must appear before the first indirect branch.
* i.e. the ret out of tramp_ventry. But it also needs the stack to be
* mapped to save/restore the registers the SMC clobbers.
*/
__mitigate_spectre_bhb_fw
.endif // \bhb == BHB_MITIGATION_FW
add x30, x30, #(1b - \vector_start + 4)
ret ret
.org 1b + 128 // Did we overflow the ventry slot?
.endm .endm
.macro tramp_exit, regsize = 64 .macro tramp_exit, regsize = 64
adr x30, tramp_vectors tramp_data_read_var x30, this_cpu_vector
get_this_cpu_offset x29
ldr x30, [x30, x29]
msr vbar_el1, x30 msr vbar_el1, x30
tramp_unmap_kernel x30 ldr lr, [sp, #S_LR]
tramp_unmap_kernel x29
.if \regsize == 64 .if \regsize == 64
mrs x30, far_el1 mrs x29, far_el1
.endif .endif
add sp, sp, #PT_REGS_SIZE // restore sp
eret eret
sb sb
.endm .endm
.align 11 .macro generate_tramp_vector, kpti, bhb
SYM_CODE_START_NOALIGN(tramp_vectors) .Lvector_start\@:
.space 0x400 .space 0x400
tramp_ventry .rept 4
tramp_ventry tramp_ventry .Lvector_start\@, 64, \kpti, \bhb
tramp_ventry .endr
tramp_ventry .rept 4
tramp_ventry .Lvector_start\@, 32, \kpti, \bhb
.endr
.endm
tramp_ventry 32 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
tramp_ventry 32 /*
tramp_ventry 32 * Exception vectors trampoline.
tramp_ventry 32 * The order must match __bp_harden_el1_vectors and the
* arm64_bp_harden_el1_vectors enum.
*/
.pushsection ".entry.tramp.text", "ax"
.align 11
SYM_CODE_START_NOALIGN(tramp_vectors)
#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_LOOP
generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_FW
generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_INSN
#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_NONE
SYM_CODE_END(tramp_vectors) SYM_CODE_END(tramp_vectors)
SYM_CODE_START(tramp_exit_native) SYM_CODE_START(tramp_exit_native)
...@@ -704,12 +774,56 @@ SYM_CODE_END(tramp_exit_compat) ...@@ -704,12 +774,56 @@ SYM_CODE_END(tramp_exit_compat)
.pushsection ".rodata", "a" .pushsection ".rodata", "a"
.align PAGE_SHIFT .align PAGE_SHIFT
SYM_DATA_START(__entry_tramp_data_start) SYM_DATA_START(__entry_tramp_data_start)
__entry_tramp_data_vectors:
.quad vectors .quad vectors
#ifdef CONFIG_ARM_SDE_INTERFACE
__entry_tramp_data___sdei_asm_handler:
.quad __sdei_asm_handler
#endif /* CONFIG_ARM_SDE_INTERFACE */
__entry_tramp_data_this_cpu_vector:
.quad this_cpu_vector
SYM_DATA_END(__entry_tramp_data_start) SYM_DATA_END(__entry_tramp_data_start)
.popsection // .rodata .popsection // .rodata
#endif /* CONFIG_RANDOMIZE_BASE */ #endif /* CONFIG_RANDOMIZE_BASE */
#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
/*
* Exception vectors for spectre mitigations on entry from EL1 when
* kpti is not in use.
*/
.macro generate_el1_vector, bhb
.Lvector_start\@:
kernel_ventry 1, t, 64, sync // Synchronous EL1t
kernel_ventry 1, t, 64, irq // IRQ EL1t
kernel_ventry 1, t, 64, fiq // FIQ EL1h
kernel_ventry 1, t, 64, error // Error EL1t
kernel_ventry 1, h, 64, sync // Synchronous EL1h
kernel_ventry 1, h, 64, irq // IRQ EL1h
kernel_ventry 1, h, 64, fiq // FIQ EL1h
kernel_ventry 1, h, 64, error // Error EL1h
.rept 4
tramp_ventry .Lvector_start\@, 64, 0, \bhb
.endr
.rept 4
tramp_ventry .Lvector_start\@, 32, 0, \bhb
.endr
.endm
/* The order must match tramp_vecs and the arm64_bp_harden_el1_vectors enum. */
.pushsection ".entry.text", "ax"
.align 11
SYM_CODE_START(__bp_harden_el1_vectors)
#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
generate_el1_vector bhb=BHB_MITIGATION_LOOP
generate_el1_vector bhb=BHB_MITIGATION_FW
generate_el1_vector bhb=BHB_MITIGATION_INSN
#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
SYM_CODE_END(__bp_harden_el1_vectors)
.popsection
/* /*
* Register switch for AArch64. The callee-saved registers need to be saved * Register switch for AArch64. The callee-saved registers need to be saved
* and restored. On entry: * and restored. On entry:
...@@ -835,14 +949,7 @@ SYM_CODE_START(__sdei_asm_entry_trampoline) ...@@ -835,14 +949,7 @@ SYM_CODE_START(__sdei_asm_entry_trampoline)
* Remember whether to unmap the kernel on exit. * Remember whether to unmap the kernel on exit.
*/ */
1: str x4, [x1, #(SDEI_EVENT_INTREGS + S_SDEI_TTBR1)] 1: str x4, [x1, #(SDEI_EVENT_INTREGS + S_SDEI_TTBR1)]
tramp_data_read_var x4, __sdei_asm_handler
#ifdef CONFIG_RANDOMIZE_BASE
adr x4, tramp_vectors + PAGE_SIZE
add x4, x4, #:lo12:__sdei_asm_trampoline_next_handler
ldr x4, [x4]
#else
ldr x4, =__sdei_asm_handler
#endif
br x4 br x4
SYM_CODE_END(__sdei_asm_entry_trampoline) SYM_CODE_END(__sdei_asm_entry_trampoline)
NOKPROBE(__sdei_asm_entry_trampoline) NOKPROBE(__sdei_asm_entry_trampoline)
...@@ -865,13 +972,6 @@ SYM_CODE_END(__sdei_asm_exit_trampoline) ...@@ -865,13 +972,6 @@ SYM_CODE_END(__sdei_asm_exit_trampoline)
NOKPROBE(__sdei_asm_exit_trampoline) NOKPROBE(__sdei_asm_exit_trampoline)
.ltorg .ltorg
.popsection // .entry.tramp.text .popsection // .entry.tramp.text
#ifdef CONFIG_RANDOMIZE_BASE
.pushsection ".rodata", "a"
SYM_DATA_START(__sdei_asm_trampoline_next_handler)
.quad __sdei_asm_handler
SYM_DATA_END(__sdei_asm_trampoline_next_handler)
.popsection // .rodata
#endif /* CONFIG_RANDOMIZE_BASE */
#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
/* /*
...@@ -981,7 +1081,7 @@ alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0 ...@@ -981,7 +1081,7 @@ alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
alternative_else_nop_endif alternative_else_nop_endif
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
tramp_alias dst=x5, sym=__sdei_asm_exit_trampoline tramp_alias dst=x5, sym=__sdei_asm_exit_trampoline, tmp=x3
br x5 br x5
#endif #endif
SYM_CODE_END(__sdei_asm_handler) SYM_CODE_END(__sdei_asm_handler)
......
...@@ -66,6 +66,10 @@ KVM_NVHE_ALIAS(kvm_patch_vector_branch); ...@@ -66,6 +66,10 @@ KVM_NVHE_ALIAS(kvm_patch_vector_branch);
KVM_NVHE_ALIAS(kvm_update_va_mask); KVM_NVHE_ALIAS(kvm_update_va_mask);
KVM_NVHE_ALIAS(kvm_get_kimage_voffset); KVM_NVHE_ALIAS(kvm_get_kimage_voffset);
KVM_NVHE_ALIAS(kvm_compute_final_ctr_el0); KVM_NVHE_ALIAS(kvm_compute_final_ctr_el0);
KVM_NVHE_ALIAS(spectre_bhb_patch_loop_iter);
KVM_NVHE_ALIAS(spectre_bhb_patch_loop_mitigation_enable);
KVM_NVHE_ALIAS(spectre_bhb_patch_wa3);
KVM_NVHE_ALIAS(spectre_bhb_patch_clearbhb);
/* Global kernel state accessed by nVHE hyp code. */ /* Global kernel state accessed by nVHE hyp code. */
KVM_NVHE_ALIAS(kvm_vgic_global_state); KVM_NVHE_ALIAS(kvm_vgic_global_state);
......
...@@ -18,15 +18,18 @@ ...@@ -18,15 +18,18 @@
*/ */
#include <linux/arm-smccc.h> #include <linux/arm-smccc.h>
#include <linux/bpf.h>
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/device.h> #include <linux/device.h>
#include <linux/nospec.h> #include <linux/nospec.h>
#include <linux/prctl.h> #include <linux/prctl.h>
#include <linux/sched/task_stack.h> #include <linux/sched/task_stack.h>
#include <asm/debug-monitors.h>
#include <asm/insn.h> #include <asm/insn.h>
#include <asm/spectre.h> #include <asm/spectre.h>
#include <asm/traps.h> #include <asm/traps.h>
#include <asm/vectors.h>
#include <asm/virt.h> #include <asm/virt.h>
/* /*
...@@ -96,14 +99,51 @@ static bool spectre_v2_mitigations_off(void) ...@@ -96,14 +99,51 @@ static bool spectre_v2_mitigations_off(void)
return ret; return ret;
} }
static const char *get_bhb_affected_string(enum mitigation_state bhb_state)
{
switch (bhb_state) {
case SPECTRE_UNAFFECTED:
return "";
default:
case SPECTRE_VULNERABLE:
return ", but not BHB";
case SPECTRE_MITIGATED:
return ", BHB";
}
}
static bool _unprivileged_ebpf_enabled(void)
{
#ifdef CONFIG_BPF_SYSCALL
return !sysctl_unprivileged_bpf_disabled;
#else
return false;
#endif
}
ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
char *buf) char *buf)
{ {
enum mitigation_state bhb_state = arm64_get_spectre_bhb_state();
const char *bhb_str = get_bhb_affected_string(bhb_state);
const char *v2_str = "Branch predictor hardening";
switch (spectre_v2_state) { switch (spectre_v2_state) {
case SPECTRE_UNAFFECTED: case SPECTRE_UNAFFECTED:
return sprintf(buf, "Not affected\n"); if (bhb_state == SPECTRE_UNAFFECTED)
return sprintf(buf, "Not affected\n");
/*
* Platforms affected by Spectre-BHB can't report
* "Not affected" for Spectre-v2.
*/
v2_str = "CSV2";
fallthrough;
case SPECTRE_MITIGATED: case SPECTRE_MITIGATED:
return sprintf(buf, "Mitigation: Branch predictor hardening\n"); if (bhb_state == SPECTRE_MITIGATED && _unprivileged_ebpf_enabled())
return sprintf(buf, "Vulnerable: Unprivileged eBPF enabled\n");
return sprintf(buf, "Mitigation: %s%s\n", v2_str, bhb_str);
case SPECTRE_VULNERABLE: case SPECTRE_VULNERABLE:
fallthrough; fallthrough;
default: default:
...@@ -554,9 +594,9 @@ void __init spectre_v4_patch_fw_mitigation_enable(struct alt_instr *alt, ...@@ -554,9 +594,9 @@ void __init spectre_v4_patch_fw_mitigation_enable(struct alt_instr *alt,
* Patch a NOP in the Spectre-v4 mitigation code with an SMC/HVC instruction * Patch a NOP in the Spectre-v4 mitigation code with an SMC/HVC instruction
* to call into firmware to adjust the mitigation state. * to call into firmware to adjust the mitigation state.
*/ */
void __init spectre_v4_patch_fw_mitigation_conduit(struct alt_instr *alt, void __init smccc_patch_fw_mitigation_conduit(struct alt_instr *alt,
__le32 *origptr, __le32 *origptr,
__le32 *updptr, int nr_inst) __le32 *updptr, int nr_inst)
{ {
u32 insn; u32 insn;
...@@ -770,3 +810,344 @@ int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) ...@@ -770,3 +810,344 @@ int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
return -ENODEV; return -ENODEV;
} }
} }
/*
* Spectre BHB.
*
* A CPU is either:
* - Mitigated by a branchy loop a CPU specific number of times, and listed
* in our "loop mitigated list".
* - Mitigated in software by the firmware Spectre v2 call.
* - Has the ClearBHB instruction to perform the mitigation.
* - Has the 'Exception Clears Branch History Buffer' (ECBHB) feature, so no
* software mitigation in the vectors is needed.
* - Has CSV2.3, so is unaffected.
*/
static enum mitigation_state spectre_bhb_state;
enum mitigation_state arm64_get_spectre_bhb_state(void)
{
return spectre_bhb_state;
}
enum bhb_mitigation_bits {
BHB_LOOP,
BHB_FW,
BHB_HW,
BHB_INSN,
};
static unsigned long system_bhb_mitigations;
/*
* This must be called with SCOPE_LOCAL_CPU for each type of CPU, before any
* SCOPE_SYSTEM call will give the right answer.
*/
u8 spectre_bhb_loop_affected(int scope)
{
u8 k = 0;
static u8 max_bhb_k;
if (scope == SCOPE_LOCAL_CPU) {
static const struct midr_range spectre_bhb_k32_list[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
{},
};
static const struct midr_range spectre_bhb_k24_list[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
{},
};
static const struct midr_range spectre_bhb_k8_list[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
{},
};
if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list))
k = 32;
else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list))
k = 24;
else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list))
k = 8;
max_bhb_k = max(max_bhb_k, k);
} else {
k = max_bhb_k;
}
return k;
}
static enum mitigation_state spectre_bhb_get_cpu_fw_mitigation_state(void)
{
int ret;
struct arm_smccc_res res;
arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
ARM_SMCCC_ARCH_WORKAROUND_3, &res);
ret = res.a0;
switch (ret) {
case SMCCC_RET_SUCCESS:
return SPECTRE_MITIGATED;
case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
return SPECTRE_UNAFFECTED;
default:
fallthrough;
case SMCCC_RET_NOT_SUPPORTED:
return SPECTRE_VULNERABLE;
}
}
static bool is_spectre_bhb_fw_affected(int scope)
{
static bool system_affected;
enum mitigation_state fw_state;
bool has_smccc = arm_smccc_1_1_get_conduit() != SMCCC_CONDUIT_NONE;
static const struct midr_range spectre_bhb_firmware_mitigated_list[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
{},
};
bool cpu_in_list = is_midr_in_range_list(read_cpuid_id(),
spectre_bhb_firmware_mitigated_list);
if (scope != SCOPE_LOCAL_CPU)
return system_affected;
fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
if (cpu_in_list || (has_smccc && fw_state == SPECTRE_MITIGATED)) {
system_affected = true;
return true;
}
return false;
}
static bool supports_ecbhb(int scope)
{
u64 mmfr1;
if (scope == SCOPE_LOCAL_CPU)
mmfr1 = read_sysreg_s(SYS_ID_AA64MMFR1_EL1);
else
mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
return cpuid_feature_extract_unsigned_field(mmfr1,
ID_AA64MMFR1_ECBHB_SHIFT);
}
bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry,
int scope)
{
WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
if (supports_csv2p3(scope))
return false;
if (supports_clearbhb(scope))
return true;
if (spectre_bhb_loop_affected(scope))
return true;
if (is_spectre_bhb_fw_affected(scope))
return true;
return false;
}
static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
{
const char *v = arm64_get_bp_hardening_vector(slot);
if (slot < 0)
return;
__this_cpu_write(this_cpu_vector, v);
/*
* When KPTI is in use, the vectors are switched when exiting to
* user-space.
*/
if (arm64_kernel_unmapped_at_el0())
return;
write_sysreg(v, vbar_el1);
isb();
}
void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
{
bp_hardening_cb_t cpu_cb;
enum mitigation_state fw_state, state = SPECTRE_VULNERABLE;
struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data);
if (!is_spectre_bhb_affected(entry, SCOPE_LOCAL_CPU))
return;
if (arm64_get_spectre_v2_state() == SPECTRE_VULNERABLE) {
/* No point mitigating Spectre-BHB alone. */
} else if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY)) {
pr_info_once("spectre-bhb mitigation disabled by compile time option\n");
} else if (cpu_mitigations_off()) {
pr_info_once("spectre-bhb mitigation disabled by command line option\n");
} else if (supports_ecbhb(SCOPE_LOCAL_CPU)) {
state = SPECTRE_MITIGATED;
set_bit(BHB_HW, &system_bhb_mitigations);
} else if (supports_clearbhb(SCOPE_LOCAL_CPU)) {
/*
* Ensure KVM uses the indirect vector which will have ClearBHB
* added.
*/
if (!data->slot)
data->slot = HYP_VECTOR_INDIRECT;
this_cpu_set_vectors(EL1_VECTOR_BHB_CLEAR_INSN);
state = SPECTRE_MITIGATED;
set_bit(BHB_INSN, &system_bhb_mitigations);
} else if (spectre_bhb_loop_affected(SCOPE_LOCAL_CPU)) {
/*
* Ensure KVM uses the indirect vector which will have the
* branchy-loop added. A57/A72-r0 will already have selected
* the spectre-indirect vector, which is sufficient for BHB
* too.
*/
if (!data->slot)
data->slot = HYP_VECTOR_INDIRECT;
this_cpu_set_vectors(EL1_VECTOR_BHB_LOOP);
state = SPECTRE_MITIGATED;
set_bit(BHB_LOOP, &system_bhb_mitigations);
} else if (is_spectre_bhb_fw_affected(SCOPE_LOCAL_CPU)) {
fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
if (fw_state == SPECTRE_MITIGATED) {
/*
* Ensure KVM uses one of the spectre bp_hardening
* vectors. The indirect vector doesn't include the EL3
* call, so needs upgrading to
* HYP_VECTOR_SPECTRE_INDIRECT.
*/
if (!data->slot || data->slot == HYP_VECTOR_INDIRECT)
data->slot += 1;
this_cpu_set_vectors(EL1_VECTOR_BHB_FW);
/*
* The WA3 call in the vectors supersedes the WA1 call
* made during context-switch. Uninstall any firmware
* bp_hardening callback.
*/
cpu_cb = spectre_v2_get_sw_mitigation_cb();
if (__this_cpu_read(bp_hardening_data.fn) != cpu_cb)
__this_cpu_write(bp_hardening_data.fn, NULL);
state = SPECTRE_MITIGATED;
set_bit(BHB_FW, &system_bhb_mitigations);
}
}
update_mitigation_state(&spectre_bhb_state, state);
}
/* Patched to NOP when enabled */
void noinstr spectre_bhb_patch_loop_mitigation_enable(struct alt_instr *alt,
__le32 *origptr,
__le32 *updptr, int nr_inst)
{
BUG_ON(nr_inst != 1);
if (test_bit(BHB_LOOP, &system_bhb_mitigations))
*updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
}
/* Patched to NOP when enabled */
void noinstr spectre_bhb_patch_fw_mitigation_enabled(struct alt_instr *alt,
__le32 *origptr,
__le32 *updptr, int nr_inst)
{
BUG_ON(nr_inst != 1);
if (test_bit(BHB_FW, &system_bhb_mitigations))
*updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
}
/* Patched to correct the immediate */
void noinstr spectre_bhb_patch_loop_iter(struct alt_instr *alt,
__le32 *origptr, __le32 *updptr, int nr_inst)
{
u8 rd;
u32 insn;
u16 loop_count = spectre_bhb_loop_affected(SCOPE_SYSTEM);
BUG_ON(nr_inst != 1); /* MOV -> MOV */
if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY))
return;
insn = le32_to_cpu(*origptr);
rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn);
insn = aarch64_insn_gen_movewide(rd, loop_count, 0,
AARCH64_INSN_VARIANT_64BIT,
AARCH64_INSN_MOVEWIDE_ZERO);
*updptr++ = cpu_to_le32(insn);
}
/* Patched to mov WA3 when supported */
void noinstr spectre_bhb_patch_wa3(struct alt_instr *alt,
__le32 *origptr, __le32 *updptr, int nr_inst)
{
u8 rd;
u32 insn;
BUG_ON(nr_inst != 1); /* MOV -> MOV */
if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY) ||
!test_bit(BHB_FW, &system_bhb_mitigations))
return;
insn = le32_to_cpu(*origptr);
rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn);
insn = aarch64_insn_gen_logical_immediate(AARCH64_INSN_LOGIC_ORR,
AARCH64_INSN_VARIANT_32BIT,
AARCH64_INSN_REG_ZR, rd,
ARM_SMCCC_ARCH_WORKAROUND_3);
if (WARN_ON_ONCE(insn == AARCH64_BREAK_FAULT))
return;
*updptr++ = cpu_to_le32(insn);
}
/* Patched to NOP when not supported */
void __init spectre_bhb_patch_clearbhb(struct alt_instr *alt,
__le32 *origptr, __le32 *updptr, int nr_inst)
{
BUG_ON(nr_inst != 2);
if (test_bit(BHB_INSN, &system_bhb_mitigations))
return;
*updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
*updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
}
#ifdef CONFIG_BPF_SYSCALL
#define EBPF_WARN "Unprivileged eBPF is enabled, data leaks possible via Spectre v2 BHB attacks!\n"
void unpriv_ebpf_notify(int new_state)
{
if (spectre_v2_state == SPECTRE_VULNERABLE ||
spectre_bhb_state != SPECTRE_MITIGATED)
return;
if (!new_state)
pr_err("WARNING: %s", EBPF_WARN);
}
#endif
...@@ -341,7 +341,7 @@ ASSERT(__hibernate_exit_text_end - (__hibernate_exit_text_start & ~(SZ_4K - 1)) ...@@ -341,7 +341,7 @@ ASSERT(__hibernate_exit_text_end - (__hibernate_exit_text_start & ~(SZ_4K - 1))
<= SZ_4K, "Hibernate exit text too big or misaligned") <= SZ_4K, "Hibernate exit text too big or misaligned")
#endif #endif
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) == PAGE_SIZE, ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) <= 3*PAGE_SIZE,
"Entry trampoline text too big") "Entry trampoline text too big")
#endif #endif
#ifdef CONFIG_KVM #ifdef CONFIG_KVM
......
...@@ -1491,10 +1491,7 @@ static int kvm_init_vector_slots(void) ...@@ -1491,10 +1491,7 @@ static int kvm_init_vector_slots(void)
base = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs)); base = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs));
kvm_init_vector_slot(base, HYP_VECTOR_SPECTRE_DIRECT); kvm_init_vector_slot(base, HYP_VECTOR_SPECTRE_DIRECT);
if (!cpus_have_const_cap(ARM64_SPECTRE_V3A)) if (kvm_system_needs_idmapped_vectors() && !has_vhe()) {
return 0;
if (!has_vhe()) {
err = create_hyp_exec_mappings(__pa_symbol(__bp_harden_hyp_vecs), err = create_hyp_exec_mappings(__pa_symbol(__bp_harden_hyp_vecs),
__BP_HARDEN_HYP_VECS_SZ, &base); __BP_HARDEN_HYP_VECS_SZ, &base);
if (err) if (err)
......
...@@ -62,6 +62,10 @@ el1_sync: // Guest trapped into EL2 ...@@ -62,6 +62,10 @@ el1_sync: // Guest trapped into EL2
/* ARM_SMCCC_ARCH_WORKAROUND_2 handling */ /* ARM_SMCCC_ARCH_WORKAROUND_2 handling */
eor w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_1 ^ \ eor w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_1 ^ \
ARM_SMCCC_ARCH_WORKAROUND_2) ARM_SMCCC_ARCH_WORKAROUND_2)
cbz w1, wa_epilogue
eor w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_2 ^ \
ARM_SMCCC_ARCH_WORKAROUND_3)
cbnz w1, el1_trap cbnz w1, el1_trap
wa_epilogue: wa_epilogue:
...@@ -192,7 +196,10 @@ SYM_CODE_END(__kvm_hyp_vector) ...@@ -192,7 +196,10 @@ SYM_CODE_END(__kvm_hyp_vector)
sub sp, sp, #(8 * 4) sub sp, sp, #(8 * 4)
stp x2, x3, [sp, #(8 * 0)] stp x2, x3, [sp, #(8 * 0)]
stp x0, x1, [sp, #(8 * 2)] stp x0, x1, [sp, #(8 * 2)]
alternative_cb spectre_bhb_patch_wa3
/* Patched to mov WA3 when supported */
mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1 mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1
alternative_cb_end
smc #0 smc #0
ldp x2, x3, [sp, #(8 * 0)] ldp x2, x3, [sp, #(8 * 0)]
add sp, sp, #(8 * 2) add sp, sp, #(8 * 2)
...@@ -205,6 +212,8 @@ SYM_CODE_END(__kvm_hyp_vector) ...@@ -205,6 +212,8 @@ SYM_CODE_END(__kvm_hyp_vector)
spectrev2_smccc_wa1_smc spectrev2_smccc_wa1_smc
.else .else
stp x0, x1, [sp, #-16]! stp x0, x1, [sp, #-16]!
mitigate_spectre_bhb_loop x0
mitigate_spectre_bhb_clear_insn
.endif .endif
.if \indirect != 0 .if \indirect != 0
alternative_cb kvm_patch_vector_branch alternative_cb kvm_patch_vector_branch
......
...@@ -148,8 +148,10 @@ int hyp_map_vectors(void) ...@@ -148,8 +148,10 @@ int hyp_map_vectors(void)
phys_addr_t phys; phys_addr_t phys;
void *bp_base; void *bp_base;
if (!cpus_have_const_cap(ARM64_SPECTRE_V3A)) if (!kvm_system_needs_idmapped_vectors()) {
__hyp_bp_vect_base = __bp_harden_hyp_vecs;
return 0; return 0;
}
phys = __hyp_pa(__bp_harden_hyp_vecs); phys = __hyp_pa(__bp_harden_hyp_vecs);
bp_base = (void *)__pkvm_create_private_mapping(phys, bp_base = (void *)__pkvm_create_private_mapping(phys,
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/jump_label.h> #include <linux/jump_label.h>
#include <linux/percpu.h>
#include <uapi/linux/psci.h> #include <uapi/linux/psci.h>
#include <kvm/arm_psci.h> #include <kvm/arm_psci.h>
...@@ -24,6 +25,8 @@ ...@@ -24,6 +25,8 @@
#include <asm/fpsimd.h> #include <asm/fpsimd.h>
#include <asm/debug-monitors.h> #include <asm/debug-monitors.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/thread_info.h>
#include <asm/vectors.h>
/* VHE specific context */ /* VHE specific context */
DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data); DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data);
...@@ -67,7 +70,7 @@ NOKPROBE_SYMBOL(__activate_traps); ...@@ -67,7 +70,7 @@ NOKPROBE_SYMBOL(__activate_traps);
static void __deactivate_traps(struct kvm_vcpu *vcpu) static void __deactivate_traps(struct kvm_vcpu *vcpu)
{ {
extern char vectors[]; /* kernel exception vectors */ const char *host_vectors = vectors;
___deactivate_traps(vcpu); ___deactivate_traps(vcpu);
...@@ -81,7 +84,10 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu) ...@@ -81,7 +84,10 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT)); asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1); write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1);
write_sysreg(vectors, vbar_el1);
if (!arm64_kernel_unmapped_at_el0())
host_vectors = __this_cpu_read(this_cpu_vector);
write_sysreg(host_vectors, vbar_el1);
} }
NOKPROBE_SYMBOL(__deactivate_traps); NOKPROBE_SYMBOL(__deactivate_traps);
......
...@@ -107,6 +107,18 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu) ...@@ -107,6 +107,18 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
break; break;
} }
break; break;
case ARM_SMCCC_ARCH_WORKAROUND_3:
switch (arm64_get_spectre_bhb_state()) {
case SPECTRE_VULNERABLE:
break;
case SPECTRE_MITIGATED:
val[0] = SMCCC_RET_SUCCESS;
break;
case SPECTRE_UNAFFECTED:
val[0] = SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED;
break;
}
break;
case ARM_SMCCC_HV_PV_TIME_FEATURES: case ARM_SMCCC_HV_PV_TIME_FEATURES:
val[0] = SMCCC_RET_SUCCESS; val[0] = SMCCC_RET_SUCCESS;
break; break;
......
...@@ -405,7 +405,7 @@ int kvm_psci_call(struct kvm_vcpu *vcpu) ...@@ -405,7 +405,7 @@ int kvm_psci_call(struct kvm_vcpu *vcpu)
int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu) int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu)
{ {
return 3; /* PSCI version and two workaround registers */ return 4; /* PSCI version and three workaround registers */
} }
int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
...@@ -419,6 +419,9 @@ int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) ...@@ -419,6 +419,9 @@ int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
if (put_user(KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2, uindices++)) if (put_user(KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2, uindices++))
return -EFAULT; return -EFAULT;
if (put_user(KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3, uindices++))
return -EFAULT;
return 0; return 0;
} }
...@@ -458,6 +461,17 @@ static int get_kernel_wa_level(u64 regid) ...@@ -458,6 +461,17 @@ static int get_kernel_wa_level(u64 regid)
case SPECTRE_VULNERABLE: case SPECTRE_VULNERABLE:
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL; return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL;
} }
break;
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3:
switch (arm64_get_spectre_bhb_state()) {
case SPECTRE_VULNERABLE:
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_AVAIL;
case SPECTRE_MITIGATED:
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_AVAIL;
case SPECTRE_UNAFFECTED:
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_REQUIRED;
}
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_AVAIL;
} }
return -EINVAL; return -EINVAL;
...@@ -474,6 +488,7 @@ int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) ...@@ -474,6 +488,7 @@ int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
break; break;
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1: case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2: case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2:
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3:
val = get_kernel_wa_level(reg->id) & KVM_REG_FEATURE_LEVEL_MASK; val = get_kernel_wa_level(reg->id) & KVM_REG_FEATURE_LEVEL_MASK;
break; break;
default: default:
...@@ -519,6 +534,7 @@ int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) ...@@ -519,6 +534,7 @@ int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
} }
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1: case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3:
if (val & ~KVM_REG_FEATURE_LEVEL_MASK) if (val & ~KVM_REG_FEATURE_LEVEL_MASK)
return -EINVAL; return -EINVAL;
......
...@@ -617,6 +617,8 @@ early_param("rodata", parse_rodata); ...@@ -617,6 +617,8 @@ early_param("rodata", parse_rodata);
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
static int __init map_entry_trampoline(void) static int __init map_entry_trampoline(void)
{ {
int i;
pgprot_t prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC; pgprot_t prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC;
phys_addr_t pa_start = __pa_symbol(__entry_tramp_text_start); phys_addr_t pa_start = __pa_symbol(__entry_tramp_text_start);
...@@ -625,11 +627,15 @@ static int __init map_entry_trampoline(void) ...@@ -625,11 +627,15 @@ static int __init map_entry_trampoline(void)
/* Map only the text into the trampoline page table */ /* Map only the text into the trampoline page table */
memset(tramp_pg_dir, 0, PGD_SIZE); memset(tramp_pg_dir, 0, PGD_SIZE);
__create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, PAGE_SIZE, __create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS,
prot, __pgd_pgtable_alloc, 0); entry_tramp_text_size(), prot,
__pgd_pgtable_alloc, NO_BLOCK_MAPPINGS);
/* Map both the text and data into the kernel page table */ /* Map both the text and data into the kernel page table */
__set_fixmap(FIX_ENTRY_TRAMP_TEXT, pa_start, prot); for (i = 0; i < DIV_ROUND_UP(entry_tramp_text_size(), PAGE_SIZE); i++)
__set_fixmap(FIX_ENTRY_TRAMP_TEXT1 - i,
pa_start + i * PAGE_SIZE, prot);
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
extern char __entry_tramp_data_start[]; extern char __entry_tramp_data_start[];
......
...@@ -44,6 +44,7 @@ MTE_ASYMM ...@@ -44,6 +44,7 @@ MTE_ASYMM
SPECTRE_V2 SPECTRE_V2
SPECTRE_V3A SPECTRE_V3A
SPECTRE_V4 SPECTRE_V4
SPECTRE_BHB
SSBS SSBS
SVE SVE
UNMAP_KERNEL_AT_EL0 UNMAP_KERNEL_AT_EL0
......
...@@ -92,6 +92,11 @@ ...@@ -92,6 +92,11 @@
ARM_SMCCC_SMC_32, \ ARM_SMCCC_SMC_32, \
0, 0x7fff) 0, 0x7fff)
#define ARM_SMCCC_ARCH_WORKAROUND_3 \
ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
ARM_SMCCC_SMC_32, \
0, 0x3fff)
#define ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID \ #define ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID \
ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
ARM_SMCCC_SMC_32, \ ARM_SMCCC_SMC_32, \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment