Commit 1f5a062d authored by Will Deacon's avatar Will Deacon

Merge branch 'for-next/cpufeature' into for-next/core

* for-next/cpufeature:
  arm64/fpsimd: Only provide the length to cpufeature for xCR registers
  selftests/arm64: add HWCAP2_HBC test
  arm64: add HWCAP for FEAT_HBC (hinted conditional branches)
  arm64/cpufeature: Use ARM64_CPUID_FIELD() to match EVT
parents 6eaae198 01948b09
......@@ -138,6 +138,7 @@
#define KERNEL_HWCAP_SME_B16B16 __khwcap2_feature(SME_B16B16)
#define KERNEL_HWCAP_SME_F16F16 __khwcap2_feature(SME_F16F16)
#define KERNEL_HWCAP_MOPS __khwcap2_feature(MOPS)
#define KERNEL_HWCAP_HBC __khwcap2_feature(HBC)
/*
* This yields a mask that user programs can use to figure out what
......
......@@ -103,5 +103,6 @@
#define HWCAP2_SME_B16B16 (1UL << 41)
#define HWCAP2_SME_F16F16 (1UL << 42)
#define HWCAP2_MOPS (1UL << 43)
#define HWCAP2_HBC (1UL << 44)
#endif /* _UAPI__ASM_HWCAP_H */
......@@ -222,7 +222,7 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
static const struct arm64_ftr_bits ftr_id_aa64isar2[] = {
ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_CSSC_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_RPRFM_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_AA64ISAR2_EL1_BC_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, ID_AA64ISAR2_EL1_BC_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_MOPS_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
FTR_STRICT, FTR_EXACT, ID_AA64ISAR2_EL1_APA3_SHIFT, 4, 0),
......@@ -2708,12 +2708,8 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.desc = "Enhanced Virtualization Traps",
.capability = ARM64_HAS_EVT,
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.sys_reg = SYS_ID_AA64MMFR2_EL1,
.sign = FTR_UNSIGNED,
.field_pos = ID_AA64MMFR2_EL1_EVT_SHIFT,
.field_width = 4,
.min_field_value = ID_AA64MMFR2_EL1_EVT_IMP,
.matches = has_cpuid_feature,
ARM64_CPUID_FIELDS(ID_AA64MMFR2_EL1, EVT, IMP)
},
{},
};
......@@ -2844,6 +2840,7 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
HWCAP_CAP(ID_AA64ISAR2_EL1, RPRES, IMP, CAP_HWCAP, KERNEL_HWCAP_RPRES),
HWCAP_CAP(ID_AA64ISAR2_EL1, WFxT, IMP, CAP_HWCAP, KERNEL_HWCAP_WFXT),
HWCAP_CAP(ID_AA64ISAR2_EL1, MOPS, IMP, CAP_HWCAP, KERNEL_HWCAP_MOPS),
HWCAP_CAP(ID_AA64ISAR2_EL1, BC, IMP, CAP_HWCAP, KERNEL_HWCAP_HBC),
#ifdef CONFIG_ARM64_SME
HWCAP_CAP(ID_AA64PFR1_EL1, SME, IMP, CAP_HWCAP, KERNEL_HWCAP_SME),
HWCAP_CAP(ID_AA64SMFR0_EL1, FA64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_FA64),
......
......@@ -126,6 +126,7 @@ static const char *const hwcap_str[] = {
[KERNEL_HWCAP_SME_B16B16] = "smeb16b16",
[KERNEL_HWCAP_SME_F16F16] = "smef16f16",
[KERNEL_HWCAP_MOPS] = "mops",
[KERNEL_HWCAP_HBC] = "hbc",
};
#ifdef CONFIG_COMPAT
......
......@@ -1178,9 +1178,6 @@ void sve_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p)
*/
u64 read_zcr_features(void)
{
u64 zcr;
unsigned int vq_max;
/*
* Set the maximum possible VL, and write zeroes to all other
* bits to see if they stick.
......@@ -1188,12 +1185,8 @@ u64 read_zcr_features(void)
sve_kernel_enable(NULL);
write_sysreg_s(ZCR_ELx_LEN_MASK, SYS_ZCR_EL1);
zcr = read_sysreg_s(SYS_ZCR_EL1);
zcr &= ~(u64)ZCR_ELx_LEN_MASK; /* find sticky 1s outside LEN field */
vq_max = sve_vq_from_vl(sve_get_vl());
zcr |= vq_max - 1; /* set LEN field to maximum effective value */
return zcr;
/* Return LEN value that would be written to get the maximum VL */
return sve_vq_from_vl(sve_get_vl()) - 1;
}
void __init sve_setup(void)
......@@ -1348,9 +1341,6 @@ void fa64_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p)
*/
u64 read_smcr_features(void)
{
u64 smcr;
unsigned int vq_max;
sme_kernel_enable(NULL);
/*
......@@ -1359,12 +1349,8 @@ u64 read_smcr_features(void)
write_sysreg_s(read_sysreg_s(SYS_SMCR_EL1) | SMCR_ELx_LEN_MASK,
SYS_SMCR_EL1);
smcr = read_sysreg_s(SYS_SMCR_EL1);
smcr &= ~(u64)SMCR_ELx_LEN_MASK; /* Only the LEN field */
vq_max = sve_vq_from_vl(sme_get_vl());
smcr |= vq_max - 1; /* set LEN field to maximum effective value */
return smcr;
/* Return LEN value that would be written to get the maximum VL */
return sve_vq_from_vl(sme_get_vl()) - 1;
}
void __init sme_setup(void)
......
......@@ -208,6 +208,13 @@ static void svebf16_sigill(void)
asm volatile(".inst 0x658aa000" : : : "z0");
}
static void hbc_sigill(void)
{
/* BC.EQ +4 */
asm volatile("cmp xzr, xzr\n"
".inst 0x54000030" : : : "cc");
}
static const struct hwcap_data {
const char *name;
unsigned long at_hwcap;
......@@ -386,6 +393,14 @@ static const struct hwcap_data {
.hwcap_bit = HWCAP2_SVE_EBF16,
.cpuinfo = "sveebf16",
},
{
.name = "HBC",
.at_hwcap = AT_HWCAP2,
.hwcap_bit = HWCAP2_HBC,
.cpuinfo = "hbc",
.sigill_fn = hbc_sigill,
.sigill_reliable = true,
},
};
static bool seen_sigill;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment