Commit b1a33cfd authored by Marc Zyngier's avatar Marc Zyngier Committed by Greg Kroah-Hartman

arm64: Advertise mitigation of Spectre-v2, or lack thereof

[ Upstream commit 73f38166 ]

We currently have a list of CPUs affected by Spectre-v2, for which
we check that the firmware implements ARCH_WORKAROUND_1. It turns
out that not all firmwares do implement the required mitigation,
and that we fail to let the user know about it.

Instead, let's slightly revamp our checks, and rely on a whitelist
of cores that are known to be non-vulnerable, and let the user know
the status of the mitigation in the kernel log.
Signed-off-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
Signed-off-by: default avatarJeremy Linton <jeremy.linton@arm.com>
Reviewed-by: default avatarAndre Przywara <andre.przywara@arm.com>
Reviewed-by: default avatarSuzuki K Poulose <suzuki.poulose@arm.com>
Reviewed-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Tested-by: default avatarStefan Wahren <stefan.wahren@i2se.com>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
Signed-off-by: default avatarArd Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 59a6dc26
...@@ -109,9 +109,9 @@ static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start, ...@@ -109,9 +109,9 @@ static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
__flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K); __flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
} }
static void __install_bp_hardening_cb(bp_hardening_cb_t fn, static void install_bp_hardening_cb(bp_hardening_cb_t fn,
const char *hyp_vecs_start, const char *hyp_vecs_start,
const char *hyp_vecs_end) const char *hyp_vecs_end)
{ {
static DEFINE_SPINLOCK(bp_lock); static DEFINE_SPINLOCK(bp_lock);
int cpu, slot = -1; int cpu, slot = -1;
...@@ -138,7 +138,7 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn, ...@@ -138,7 +138,7 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
#define __smccc_workaround_1_smc_start NULL #define __smccc_workaround_1_smc_start NULL
#define __smccc_workaround_1_smc_end NULL #define __smccc_workaround_1_smc_end NULL
static void __install_bp_hardening_cb(bp_hardening_cb_t fn, static void install_bp_hardening_cb(bp_hardening_cb_t fn,
const char *hyp_vecs_start, const char *hyp_vecs_start,
const char *hyp_vecs_end) const char *hyp_vecs_end)
{ {
...@@ -146,23 +146,6 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn, ...@@ -146,23 +146,6 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
} }
#endif /* CONFIG_KVM_INDIRECT_VECTORS */ #endif /* CONFIG_KVM_INDIRECT_VECTORS */
static void install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry,
bp_hardening_cb_t fn,
const char *hyp_vecs_start,
const char *hyp_vecs_end)
{
u64 pfr0;
if (!entry->matches(entry, SCOPE_LOCAL_CPU))
return;
pfr0 = read_cpuid(ID_AA64PFR0_EL1);
if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
return;
__install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end);
}
#include <uapi/linux/psci.h> #include <uapi/linux/psci.h>
#include <linux/arm-smccc.h> #include <linux/arm-smccc.h>
#include <linux/psci.h> #include <linux/psci.h>
...@@ -197,31 +180,27 @@ static int __init parse_nospectre_v2(char *str) ...@@ -197,31 +180,27 @@ static int __init parse_nospectre_v2(char *str)
} }
early_param("nospectre_v2", parse_nospectre_v2); early_param("nospectre_v2", parse_nospectre_v2);
static void /*
enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry) * -1: No workaround
* 0: No workaround required
* 1: Workaround installed
*/
static int detect_harden_bp_fw(void)
{ {
bp_hardening_cb_t cb; bp_hardening_cb_t cb;
void *smccc_start, *smccc_end; void *smccc_start, *smccc_end;
struct arm_smccc_res res; struct arm_smccc_res res;
u32 midr = read_cpuid_id(); u32 midr = read_cpuid_id();
if (!entry->matches(entry, SCOPE_LOCAL_CPU))
return;
if (__nospectre_v2) {
pr_info_once("spectrev2 mitigation disabled by command line option\n");
return;
}
if (psci_ops.smccc_version == SMCCC_VERSION_1_0) if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
return; return -1;
switch (psci_ops.conduit) { switch (psci_ops.conduit) {
case PSCI_CONDUIT_HVC: case PSCI_CONDUIT_HVC:
arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
ARM_SMCCC_ARCH_WORKAROUND_1, &res); ARM_SMCCC_ARCH_WORKAROUND_1, &res);
if ((int)res.a0 < 0) if ((int)res.a0 < 0)
return; return -1;
cb = call_hvc_arch_workaround_1; cb = call_hvc_arch_workaround_1;
/* This is a guest, no need to patch KVM vectors */ /* This is a guest, no need to patch KVM vectors */
smccc_start = NULL; smccc_start = NULL;
...@@ -232,23 +211,23 @@ enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry) ...@@ -232,23 +211,23 @@ enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
ARM_SMCCC_ARCH_WORKAROUND_1, &res); ARM_SMCCC_ARCH_WORKAROUND_1, &res);
if ((int)res.a0 < 0) if ((int)res.a0 < 0)
return; return -1;
cb = call_smc_arch_workaround_1; cb = call_smc_arch_workaround_1;
smccc_start = __smccc_workaround_1_smc_start; smccc_start = __smccc_workaround_1_smc_start;
smccc_end = __smccc_workaround_1_smc_end; smccc_end = __smccc_workaround_1_smc_end;
break; break;
default: default:
return; return -1;
} }
if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) || if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1)) ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1))
cb = qcom_link_stack_sanitization; cb = qcom_link_stack_sanitization;
install_bp_hardening_cb(entry, cb, smccc_start, smccc_end); install_bp_hardening_cb(cb, smccc_start, smccc_end);
return; return 1;
} }
#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */ #endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
...@@ -535,24 +514,48 @@ multi_entry_cap_cpu_enable(const struct arm64_cpu_capabilities *entry) ...@@ -535,24 +514,48 @@ multi_entry_cap_cpu_enable(const struct arm64_cpu_capabilities *entry)
} }
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
/* /*
* List of CPUs where we need to issue a psci call to * List of CPUs that do not need any Spectre-v2 mitigation at all.
* harden the branch predictor.
*/ */
static const struct midr_range arm64_bp_harden_smccc_cpus[] = { static const struct midr_range spectre_v2_safe_list[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A75), { /* sentinel */ }
MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
MIDR_ALL_VERSIONS(MIDR_NVIDIA_DENVER),
{},
}; };
static bool __maybe_unused
check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope)
{
int need_wa;
WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
/* If the CPU has CSV2 set, we're safe */
if (cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64PFR0_EL1),
ID_AA64PFR0_CSV2_SHIFT))
return false;
/* Alternatively, we have a list of unaffected CPUs */
if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
return false;
/* Fallback to firmware detection */
need_wa = detect_harden_bp_fw();
if (!need_wa)
return false;
/* forced off */
if (__nospectre_v2) {
pr_info_once("spectrev2 mitigation disabled by command line option\n");
return false;
}
if (need_wa < 0)
pr_warn_once("ARM_SMCCC_ARCH_WORKAROUND_1 missing from firmware\n");
return (need_wa > 0);
}
#endif #endif
#ifdef CONFIG_HARDEN_EL2_VECTORS #ifdef CONFIG_HARDEN_EL2_VECTORS
...@@ -715,8 +718,8 @@ const struct arm64_cpu_capabilities arm64_errata[] = { ...@@ -715,8 +718,8 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
{ {
.capability = ARM64_HARDEN_BRANCH_PREDICTOR, .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
.cpu_enable = enable_smccc_arch_workaround_1, .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
ERRATA_MIDR_RANGE_LIST(arm64_bp_harden_smccc_cpus), .matches = check_branch_predictor,
}, },
#endif #endif
#ifdef CONFIG_HARDEN_EL2_VECTORS #ifdef CONFIG_HARDEN_EL2_VECTORS
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment