Commit fe64d7d6 authored by Suzuki K Poulose's avatar Suzuki K Poulose Committed by Greg Kroah-Hartman

arm64: Add hypervisor safe helper for checking constant capabilities

commit a4023f68 upstream.

The hypervisor may not have full access to the kernel data structures
and hence cannot safely use cpus_have_cap() helper for checking the
system capability. Add a safe helper for hypervisors to check a constant
system capability, which *doesn't* fall back to checking the bitmap
maintained by the kernel. With this, make the cpus_have_cap() only
check the bitmask and force constant cap checks to use the new API
for quicker checks.

Cc: Robert Ritcher <rritcher@cavium.com>
Cc: Tirumalesh Chalamarla <tchalamarla@cavium.com>
Signed-off-by: default avatarSuzuki K Poulose <suzuki.poulose@arm.com>
Reviewed-by: default avatarWill Deacon <will.deacon@arm.com>
Reviewed-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
[4.9: restore cpus_have_const_cap() to previously-backported code]
Signed-off-by: Mark Rutland <mark.rutland@arm.com> [v4.9 backport]
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent e1928457
...@@ -9,8 +9,6 @@ ...@@ -9,8 +9,6 @@
#ifndef __ASM_CPUFEATURE_H #ifndef __ASM_CPUFEATURE_H
#define __ASM_CPUFEATURE_H #define __ASM_CPUFEATURE_H
#include <linux/jump_label.h>
#include <asm/cpucaps.h> #include <asm/cpucaps.h>
#include <asm/hwcap.h> #include <asm/hwcap.h>
#include <asm/sysreg.h> #include <asm/sysreg.h>
...@@ -27,6 +25,8 @@ ...@@ -27,6 +25,8 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <linux/bug.h>
#include <linux/jump_label.h>
#include <linux/kernel.h> #include <linux/kernel.h>
/* CPU feature register tracking */ /* CPU feature register tracking */
...@@ -104,14 +104,19 @@ static inline bool cpu_have_feature(unsigned int num) ...@@ -104,14 +104,19 @@ static inline bool cpu_have_feature(unsigned int num)
return elf_hwcap & (1UL << num); return elf_hwcap & (1UL << num);
} }
/* System capability check for constant caps */
static inline bool cpus_have_const_cap(int num)
{
if (num >= ARM64_NCAPS)
return false;
return static_branch_unlikely(&cpu_hwcap_keys[num]);
}
static inline bool cpus_have_cap(unsigned int num) static inline bool cpus_have_cap(unsigned int num)
{ {
if (num >= ARM64_NCAPS) if (num >= ARM64_NCAPS)
return false; return false;
if (__builtin_constant_p(num)) return test_bit(num, cpu_hwcaps);
return static_branch_unlikely(&cpu_hwcap_keys[num]);
else
return test_bit(num, cpu_hwcaps);
} }
static inline void cpus_set_cap(unsigned int num) static inline void cpus_set_cap(unsigned int num)
...@@ -200,7 +205,7 @@ static inline bool cpu_supports_mixed_endian_el0(void) ...@@ -200,7 +205,7 @@ static inline bool cpu_supports_mixed_endian_el0(void)
static inline bool system_supports_32bit_el0(void) static inline bool system_supports_32bit_el0(void)
{ {
return cpus_have_cap(ARM64_HAS_32BIT_EL0); return cpus_have_const_cap(ARM64_HAS_32BIT_EL0);
} }
static inline bool system_supports_mixed_endian_el0(void) static inline bool system_supports_mixed_endian_el0(void)
......
...@@ -398,7 +398,7 @@ static inline void __cpu_init_stage2(void) ...@@ -398,7 +398,7 @@ static inline void __cpu_init_stage2(void)
static inline bool kvm_arm_harden_branch_predictor(void) static inline bool kvm_arm_harden_branch_predictor(void)
{ {
return cpus_have_cap(ARM64_HARDEN_BRANCH_PREDICTOR); return cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR);
} }
#endif /* __ARM64_KVM_HOST_H__ */ #endif /* __ARM64_KVM_HOST_H__ */
...@@ -341,7 +341,7 @@ static inline void *kvm_get_hyp_vector(void) ...@@ -341,7 +341,7 @@ static inline void *kvm_get_hyp_vector(void)
vect = __bp_harden_hyp_vecs_start + vect = __bp_harden_hyp_vecs_start +
data->hyp_vectors_slot * SZ_2K; data->hyp_vectors_slot * SZ_2K;
if (!cpus_have_cap(ARM64_HAS_VIRT_HOST_EXTN)) if (!cpus_have_const_cap(ARM64_HAS_VIRT_HOST_EXTN))
vect = lm_alias(vect); vect = lm_alias(vect);
} }
......
...@@ -37,7 +37,7 @@ typedef struct { ...@@ -37,7 +37,7 @@ typedef struct {
static inline bool arm64_kernel_unmapped_at_el0(void) static inline bool arm64_kernel_unmapped_at_el0(void)
{ {
return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0) && return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0) &&
cpus_have_cap(ARM64_UNMAP_KERNEL_AT_EL0); cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0);
} }
typedef void (*bp_hardening_cb_t)(void); typedef void (*bp_hardening_cb_t)(void);
......
...@@ -47,6 +47,7 @@ unsigned int compat_elf_hwcap2 __read_mostly; ...@@ -47,6 +47,7 @@ unsigned int compat_elf_hwcap2 __read_mostly;
#endif #endif
DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
EXPORT_SYMBOL(cpu_hwcaps);
DEFINE_STATIC_KEY_ARRAY_FALSE(cpu_hwcap_keys, ARM64_NCAPS); DEFINE_STATIC_KEY_ARRAY_FALSE(cpu_hwcap_keys, ARM64_NCAPS);
EXPORT_SYMBOL(cpu_hwcap_keys); EXPORT_SYMBOL(cpu_hwcap_keys);
...@@ -762,7 +763,7 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry, ...@@ -762,7 +763,7 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
* ThunderX leads to apparent I-cache corruption of kernel text, which * ThunderX leads to apparent I-cache corruption of kernel text, which
* ends as well as you might imagine. Don't even try. * ends as well as you might imagine. Don't even try.
*/ */
if (cpus_have_cap(ARM64_WORKAROUND_CAVIUM_27456)) { if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456)) {
str = "ARM64_WORKAROUND_CAVIUM_27456"; str = "ARM64_WORKAROUND_CAVIUM_27456";
__kpti_forced = -1; __kpti_forced = -1;
} }
...@@ -1203,5 +1204,5 @@ void __init setup_cpu_features(void) ...@@ -1203,5 +1204,5 @@ void __init setup_cpu_features(void)
static bool __maybe_unused static bool __maybe_unused
cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused) cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused)
{ {
return (cpus_have_cap(ARM64_HAS_PAN) && !cpus_have_cap(ARM64_HAS_UAO)); return (cpus_have_const_cap(ARM64_HAS_PAN) && !cpus_have_const_cap(ARM64_HAS_UAO));
} }
...@@ -291,7 +291,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, ...@@ -291,7 +291,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
memset(childregs, 0, sizeof(struct pt_regs)); memset(childregs, 0, sizeof(struct pt_regs));
childregs->pstate = PSR_MODE_EL1h; childregs->pstate = PSR_MODE_EL1h;
if (IS_ENABLED(CONFIG_ARM64_UAO) && if (IS_ENABLED(CONFIG_ARM64_UAO) &&
cpus_have_cap(ARM64_HAS_UAO)) cpus_have_const_cap(ARM64_HAS_UAO))
childregs->pstate |= PSR_UAO_BIT; childregs->pstate |= PSR_UAO_BIT;
p->thread.cpu_context.x19 = stack_start; p->thread.cpu_context.x19 = stack_start;
p->thread.cpu_context.x20 = stk_sz; p->thread.cpu_context.x20 = stk_sz;
......
...@@ -120,11 +120,10 @@ static void gic_redist_wait_for_rwp(void) ...@@ -120,11 +120,10 @@ static void gic_redist_wait_for_rwp(void)
} }
#ifdef CONFIG_ARM64 #ifdef CONFIG_ARM64
static DEFINE_STATIC_KEY_FALSE(is_cavium_thunderx);
static u64 __maybe_unused gic_read_iar(void) static u64 __maybe_unused gic_read_iar(void)
{ {
if (static_branch_unlikely(&is_cavium_thunderx)) if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_23154))
return gic_read_iar_cavium_thunderx(); return gic_read_iar_cavium_thunderx();
else else
return gic_read_iar_common(); return gic_read_iar_common();
...@@ -908,14 +907,6 @@ static const struct irq_domain_ops partition_domain_ops = { ...@@ -908,14 +907,6 @@ static const struct irq_domain_ops partition_domain_ops = {
.select = gic_irq_domain_select, .select = gic_irq_domain_select,
}; };
static void gicv3_enable_quirks(void)
{
#ifdef CONFIG_ARM64
if (cpus_have_cap(ARM64_WORKAROUND_CAVIUM_23154))
static_branch_enable(&is_cavium_thunderx);
#endif
}
static int __init gic_init_bases(void __iomem *dist_base, static int __init gic_init_bases(void __iomem *dist_base,
struct redist_region *rdist_regs, struct redist_region *rdist_regs,
u32 nr_redist_regions, u32 nr_redist_regions,
...@@ -938,8 +929,6 @@ static int __init gic_init_bases(void __iomem *dist_base, ...@@ -938,8 +929,6 @@ static int __init gic_init_bases(void __iomem *dist_base,
gic_data.nr_redist_regions = nr_redist_regions; gic_data.nr_redist_regions = nr_redist_regions;
gic_data.redist_stride = redist_stride; gic_data.redist_stride = redist_stride;
gicv3_enable_quirks();
/* /*
* Find out how many interrupts are supported. * Find out how many interrupts are supported.
* The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI) * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment