Commit 55adc08d authored by Mark Brown's avatar Mark Brown Committed by Catalin Marinas

arm64/sysreg: Add _EL1 into ID_AA64PFR0_EL1 definition names

Normally we include the full register name in the defines for fields within
registers but this has not been followed for ID registers. In preparation
for automatic generation of defines add the _EL1s into the defines for
ID_AA64PFR0_EL1 to follow the convention. No functional changes.
Signed-off-by: default avatarMark Brown <broonie@kernel.org>
Reviewed-by: default avatarKristina Martsenko <kristina.martsenko@arm.com>
Link: https://lore.kernel.org/r/20220905225425.1871461-7-broonie@kernel.orgSigned-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent a957c6be
...@@ -524,7 +524,7 @@ alternative_endif ...@@ -524,7 +524,7 @@ alternative_endif
*/ */
.macro reset_amuserenr_el0, tmpreg .macro reset_amuserenr_el0, tmpreg
mrs \tmpreg, id_aa64pfr0_el1 // Check ID_AA64PFR0_EL1 mrs \tmpreg, id_aa64pfr0_el1 // Check ID_AA64PFR0_EL1
ubfx \tmpreg, \tmpreg, #ID_AA64PFR0_AMU_SHIFT, #4 ubfx \tmpreg, \tmpreg, #ID_AA64PFR0_EL1_AMU_SHIFT, #4
cbz \tmpreg, .Lskip_\@ // Skip if no AMU present cbz \tmpreg, .Lskip_\@ // Skip if no AMU present
msr_s SYS_AMUSERENR_EL0, xzr // Disable AMU access from EL0 msr_s SYS_AMUSERENR_EL0, xzr // Disable AMU access from EL0
.Lskip_\@: .Lskip_\@:
......
...@@ -603,21 +603,21 @@ static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0) ...@@ -603,21 +603,21 @@ static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0)
static inline bool id_aa64pfr0_32bit_el1(u64 pfr0) static inline bool id_aa64pfr0_32bit_el1(u64 pfr0)
{ {
u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_SHIFT); u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_EL1_SHIFT);
return val == ID_AA64PFR0_ELx_32BIT_64BIT; return val == ID_AA64PFR0_EL1_ELx_32BIT_64BIT;
} }
static inline bool id_aa64pfr0_32bit_el0(u64 pfr0) static inline bool id_aa64pfr0_32bit_el0(u64 pfr0)
{ {
u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL0_SHIFT); u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_EL0_SHIFT);
return val == ID_AA64PFR0_ELx_32BIT_64BIT; return val == ID_AA64PFR0_EL1_ELx_32BIT_64BIT;
} }
static inline bool id_aa64pfr0_sve(u64 pfr0) static inline bool id_aa64pfr0_sve(u64 pfr0)
{ {
u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_SVE_SHIFT); u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_SVE_SHIFT);
return val > 0; return val > 0;
} }
...@@ -659,7 +659,7 @@ static inline bool supports_csv2p3(int scope) ...@@ -659,7 +659,7 @@ static inline bool supports_csv2p3(int scope)
pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
csv2_val = cpuid_feature_extract_unsigned_field(pfr0, csv2_val = cpuid_feature_extract_unsigned_field(pfr0,
ID_AA64PFR0_CSV2_SHIFT); ID_AA64PFR0_EL1_CSV2_SHIFT);
return csv2_val == 3; return csv2_val == 3;
} }
......
...@@ -97,7 +97,7 @@ ...@@ -97,7 +97,7 @@
/* GICv3 system register access */ /* GICv3 system register access */
.macro __init_el2_gicv3 .macro __init_el2_gicv3
mrs x0, id_aa64pfr0_el1 mrs x0, id_aa64pfr0_el1
ubfx x0, x0, #ID_AA64PFR0_GIC_SHIFT, #4 ubfx x0, x0, #ID_AA64PFR0_EL1_GIC_SHIFT, #4
cbz x0, .Lskip_gicv3_\@ cbz x0, .Lskip_gicv3_\@
mrs_s x0, SYS_ICC_SRE_EL2 mrs_s x0, SYS_ICC_SRE_EL2
...@@ -162,7 +162,7 @@ ...@@ -162,7 +162,7 @@
msr_s SYS_HFGITR_EL2, xzr msr_s SYS_HFGITR_EL2, xzr
mrs x1, id_aa64pfr0_el1 // AMU traps UNDEF without AMU mrs x1, id_aa64pfr0_el1 // AMU traps UNDEF without AMU
ubfx x1, x1, #ID_AA64PFR0_AMU_SHIFT, #4 ubfx x1, x1, #ID_AA64PFR0_EL1_AMU_SHIFT, #4
cbz x1, .Lskip_fgt_\@ cbz x1, .Lskip_fgt_\@
msr_s SYS_HAFGRTR_EL2, xzr msr_s SYS_HAFGRTR_EL2, xzr
......
...@@ -686,32 +686,32 @@ ...@@ -686,32 +686,32 @@
#define MAIR_ATTRIDX(attr, idx) ((attr) << ((idx) * 8)) #define MAIR_ATTRIDX(attr, idx) ((attr) << ((idx) * 8))
/* id_aa64pfr0 */ /* id_aa64pfr0 */
#define ID_AA64PFR0_CSV3_SHIFT 60 #define ID_AA64PFR0_EL1_CSV3_SHIFT 60
#define ID_AA64PFR0_CSV2_SHIFT 56 #define ID_AA64PFR0_EL1_CSV2_SHIFT 56
#define ID_AA64PFR0_DIT_SHIFT 48 #define ID_AA64PFR0_EL1_DIT_SHIFT 48
#define ID_AA64PFR0_AMU_SHIFT 44 #define ID_AA64PFR0_EL1_AMU_SHIFT 44
#define ID_AA64PFR0_MPAM_SHIFT 40 #define ID_AA64PFR0_EL1_MPAM_SHIFT 40
#define ID_AA64PFR0_SEL2_SHIFT 36 #define ID_AA64PFR0_EL1_SEL2_SHIFT 36
#define ID_AA64PFR0_SVE_SHIFT 32 #define ID_AA64PFR0_EL1_SVE_SHIFT 32
#define ID_AA64PFR0_RAS_SHIFT 28 #define ID_AA64PFR0_EL1_RAS_SHIFT 28
#define ID_AA64PFR0_GIC_SHIFT 24 #define ID_AA64PFR0_EL1_GIC_SHIFT 24
#define ID_AA64PFR0_ASIMD_SHIFT 20 #define ID_AA64PFR0_EL1_ASIMD_SHIFT 20
#define ID_AA64PFR0_FP_SHIFT 16 #define ID_AA64PFR0_EL1_FP_SHIFT 16
#define ID_AA64PFR0_EL3_SHIFT 12 #define ID_AA64PFR0_EL1_EL3_SHIFT 12
#define ID_AA64PFR0_EL2_SHIFT 8 #define ID_AA64PFR0_EL1_EL2_SHIFT 8
#define ID_AA64PFR0_EL1_SHIFT 4 #define ID_AA64PFR0_EL1_EL1_SHIFT 4
#define ID_AA64PFR0_EL0_SHIFT 0 #define ID_AA64PFR0_EL1_EL0_SHIFT 0
#define ID_AA64PFR0_AMU 0x1 #define ID_AA64PFR0_EL1_AMU 0x1
#define ID_AA64PFR0_SVE 0x1 #define ID_AA64PFR0_EL1_SVE 0x1
#define ID_AA64PFR0_RAS_V1 0x1 #define ID_AA64PFR0_EL1_RAS_V1 0x1
#define ID_AA64PFR0_RAS_V1P1 0x2 #define ID_AA64PFR0_EL1_RAS_V1P1 0x2
#define ID_AA64PFR0_FP_NI 0xf #define ID_AA64PFR0_EL1_FP_NI 0xf
#define ID_AA64PFR0_FP_SUPPORTED 0x0 #define ID_AA64PFR0_EL1_FP_SUPPORTED 0x0
#define ID_AA64PFR0_ASIMD_NI 0xf #define ID_AA64PFR0_EL1_ASIMD_NI 0xf
#define ID_AA64PFR0_ASIMD_SUPPORTED 0x0 #define ID_AA64PFR0_EL1_ASIMD_SUPPORTED 0x0
#define ID_AA64PFR0_ELx_64BIT_ONLY 0x1 #define ID_AA64PFR0_EL1_ELx_64BIT_ONLY 0x1
#define ID_AA64PFR0_ELx_32BIT_64BIT 0x2 #define ID_AA64PFR0_EL1_ELx_32BIT_64BIT 0x2
/* id_aa64pfr1 */ /* id_aa64pfr1 */
#define ID_AA64PFR1_SME_SHIFT 24 #define ID_AA64PFR1_SME_SHIFT 24
......
...@@ -243,22 +243,22 @@ static const struct arm64_ftr_bits ftr_id_aa64isar2[] = { ...@@ -243,22 +243,22 @@ static const struct arm64_ftr_bits ftr_id_aa64isar2[] = {
}; };
static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = { static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_CSV3_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_CSV2_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_DIT_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_DIT_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_AMU_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_AMU_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_MPAM_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_MPAM_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SEL2_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SEL2_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0), FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SVE_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_RAS_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_RAS_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_GIC_SHIFT, 4, 0),
S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI), S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_ASIMD_SHIFT, 4, ID_AA64PFR0_EL1_ASIMD_NI),
S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI), S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_FP_SHIFT, 4, ID_AA64PFR0_EL1_FP_NI),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL3_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_EL3_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL2_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_EL2_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_ELx_64BIT_ONLY), ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_EL1_SHIFT, 4, ID_AA64PFR0_EL1_ELx_64BIT_ONLY),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_ELx_64BIT_ONLY), ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_EL0_SHIFT, 4, ID_AA64PFR0_EL1_ELx_64BIT_ONLY),
ARM64_FTR_END, ARM64_FTR_END,
}; };
...@@ -1492,7 +1492,7 @@ static bool has_no_fpsimd(const struct arm64_cpu_capabilities *entry, int __unus ...@@ -1492,7 +1492,7 @@ static bool has_no_fpsimd(const struct arm64_cpu_capabilities *entry, int __unus
u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
return cpuid_feature_extract_signed_field(pfr0, return cpuid_feature_extract_signed_field(pfr0,
ID_AA64PFR0_FP_SHIFT) < 0; ID_AA64PFR0_EL1_FP_SHIFT) < 0;
} }
static bool has_cache_idc(const struct arm64_cpu_capabilities *entry, static bool has_cache_idc(const struct arm64_cpu_capabilities *entry,
...@@ -2093,7 +2093,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { ...@@ -2093,7 +2093,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE, .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
.matches = has_useable_gicv3_cpuif, .matches = has_useable_gicv3_cpuif,
.sys_reg = SYS_ID_AA64PFR0_EL1, .sys_reg = SYS_ID_AA64PFR0_EL1,
.field_pos = ID_AA64PFR0_GIC_SHIFT, .field_pos = ID_AA64PFR0_EL1_GIC_SHIFT,
.field_width = 4, .field_width = 4,
.sign = FTR_UNSIGNED, .sign = FTR_UNSIGNED,
.min_field_value = 1, .min_field_value = 1,
...@@ -2168,9 +2168,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = { ...@@ -2168,9 +2168,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.matches = has_32bit_el0, .matches = has_32bit_el0,
.sys_reg = SYS_ID_AA64PFR0_EL1, .sys_reg = SYS_ID_AA64PFR0_EL1,
.sign = FTR_UNSIGNED, .sign = FTR_UNSIGNED,
.field_pos = ID_AA64PFR0_EL0_SHIFT, .field_pos = ID_AA64PFR0_EL1_EL0_SHIFT,
.field_width = 4, .field_width = 4,
.min_field_value = ID_AA64PFR0_ELx_32BIT_64BIT, .min_field_value = ID_AA64PFR0_EL1_ELx_32BIT_64BIT,
}, },
#ifdef CONFIG_KVM #ifdef CONFIG_KVM
{ {
...@@ -2180,9 +2180,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = { ...@@ -2180,9 +2180,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.matches = has_cpuid_feature, .matches = has_cpuid_feature,
.sys_reg = SYS_ID_AA64PFR0_EL1, .sys_reg = SYS_ID_AA64PFR0_EL1,
.sign = FTR_UNSIGNED, .sign = FTR_UNSIGNED,
.field_pos = ID_AA64PFR0_EL1_SHIFT, .field_pos = ID_AA64PFR0_EL1_EL1_SHIFT,
.field_width = 4, .field_width = 4,
.min_field_value = ID_AA64PFR0_ELx_32BIT_64BIT, .min_field_value = ID_AA64PFR0_EL1_ELx_32BIT_64BIT,
}, },
{ {
.desc = "Protected KVM", .desc = "Protected KVM",
...@@ -2201,7 +2201,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { ...@@ -2201,7 +2201,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
* more details. * more details.
*/ */
.sys_reg = SYS_ID_AA64PFR0_EL1, .sys_reg = SYS_ID_AA64PFR0_EL1,
.field_pos = ID_AA64PFR0_CSV3_SHIFT, .field_pos = ID_AA64PFR0_EL1_CSV3_SHIFT,
.field_width = 4, .field_width = 4,
.min_field_value = 1, .min_field_value = 1,
.matches = unmap_kernel_at_el0, .matches = unmap_kernel_at_el0,
...@@ -2244,9 +2244,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = { ...@@ -2244,9 +2244,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.capability = ARM64_SVE, .capability = ARM64_SVE,
.sys_reg = SYS_ID_AA64PFR0_EL1, .sys_reg = SYS_ID_AA64PFR0_EL1,
.sign = FTR_UNSIGNED, .sign = FTR_UNSIGNED,
.field_pos = ID_AA64PFR0_SVE_SHIFT, .field_pos = ID_AA64PFR0_EL1_SVE_SHIFT,
.field_width = 4, .field_width = 4,
.min_field_value = ID_AA64PFR0_SVE, .min_field_value = ID_AA64PFR0_EL1_SVE,
.matches = has_cpuid_feature, .matches = has_cpuid_feature,
.cpu_enable = sve_kernel_enable, .cpu_enable = sve_kernel_enable,
}, },
...@@ -2259,9 +2259,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = { ...@@ -2259,9 +2259,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.matches = has_cpuid_feature, .matches = has_cpuid_feature,
.sys_reg = SYS_ID_AA64PFR0_EL1, .sys_reg = SYS_ID_AA64PFR0_EL1,
.sign = FTR_UNSIGNED, .sign = FTR_UNSIGNED,
.field_pos = ID_AA64PFR0_RAS_SHIFT, .field_pos = ID_AA64PFR0_EL1_RAS_SHIFT,
.field_width = 4, .field_width = 4,
.min_field_value = ID_AA64PFR0_RAS_V1, .min_field_value = ID_AA64PFR0_EL1_RAS_V1,
.cpu_enable = cpu_clear_disr, .cpu_enable = cpu_clear_disr,
}, },
#endif /* CONFIG_ARM64_RAS_EXTN */ #endif /* CONFIG_ARM64_RAS_EXTN */
...@@ -2278,9 +2278,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = { ...@@ -2278,9 +2278,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.matches = has_amu, .matches = has_amu,
.sys_reg = SYS_ID_AA64PFR0_EL1, .sys_reg = SYS_ID_AA64PFR0_EL1,
.sign = FTR_UNSIGNED, .sign = FTR_UNSIGNED,
.field_pos = ID_AA64PFR0_AMU_SHIFT, .field_pos = ID_AA64PFR0_EL1_AMU_SHIFT,
.field_width = 4, .field_width = 4,
.min_field_value = ID_AA64PFR0_AMU, .min_field_value = ID_AA64PFR0_EL1_AMU,
.cpu_enable = cpu_amu_enable, .cpu_enable = cpu_amu_enable,
}, },
#endif /* CONFIG_ARM64_AMU_EXTN */ #endif /* CONFIG_ARM64_AMU_EXTN */
...@@ -2485,7 +2485,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { ...@@ -2485,7 +2485,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE, .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
.matches = can_use_gic_priorities, .matches = can_use_gic_priorities,
.sys_reg = SYS_ID_AA64PFR0_EL1, .sys_reg = SYS_ID_AA64PFR0_EL1,
.field_pos = ID_AA64PFR0_GIC_SHIFT, .field_pos = ID_AA64PFR0_EL1_GIC_SHIFT,
.field_width = 4, .field_width = 4,
.sign = FTR_UNSIGNED, .sign = FTR_UNSIGNED,
.min_field_value = 1, .min_field_value = 1,
...@@ -2708,11 +2708,11 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { ...@@ -2708,11 +2708,11 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_TS_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FLAGM), HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_TS_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FLAGM),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_TS_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_FLAGM2), HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_TS_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_FLAGM2),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_RNDR_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RNG), HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_RNDR_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RNG),
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, 4, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_FP), HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_FP_SHIFT, 4, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_FP),
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FPHP), HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_FP_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FPHP),
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, 4, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_ASIMD), HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_ASIMD_SHIFT, 4, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_ASIMD),
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDHP), HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_ASIMD_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDHP),
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_DIT_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DIT), HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_DIT_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DIT),
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_DPB_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DCPOP), HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_DPB_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DCPOP),
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_DPB_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_DCPODP), HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_DPB_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_DCPODP),
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_JSCVT_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_JSCVT), HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_JSCVT_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_JSCVT),
...@@ -2727,7 +2727,7 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { ...@@ -2727,7 +2727,7 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_I8MM_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_I8MM), HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_I8MM_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_I8MM),
HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_EL1_AT_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_USCAT), HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_EL1_AT_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_USCAT),
#ifdef CONFIG_ARM64_SVE #ifdef CONFIG_ARM64_SVE
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, KERNEL_HWCAP_SVE), HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_SVE_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR0_EL1_SVE, CAP_HWCAP, KERNEL_HWCAP_SVE),
HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_SVEver_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_SVEver_SVE2, CAP_HWCAP, KERNEL_HWCAP_SVE2), HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_SVEver_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_SVEver_SVE2, CAP_HWCAP, KERNEL_HWCAP_SVE2),
HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_AES_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_AES_IMP, CAP_HWCAP, KERNEL_HWCAP_SVEAES), HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_AES_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_AES_IMP, CAP_HWCAP, KERNEL_HWCAP_SVEAES),
HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_AES_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_AES_PMULL128, CAP_HWCAP, KERNEL_HWCAP_SVEPMULL), HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_AES_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_AES_PMULL128, CAP_HWCAP, KERNEL_HWCAP_SVEPMULL),
......
...@@ -98,7 +98,7 @@ SYM_CODE_START_LOCAL(elx_sync) ...@@ -98,7 +98,7 @@ SYM_CODE_START_LOCAL(elx_sync)
SYM_CODE_END(elx_sync) SYM_CODE_END(elx_sync)
SYM_CODE_START_LOCAL(__finalise_el2) SYM_CODE_START_LOCAL(__finalise_el2)
check_override id_aa64pfr0 ID_AA64PFR0_SVE_SHIFT .Linit_sve .Lskip_sve check_override id_aa64pfr0 ID_AA64PFR0_EL1_SVE_SHIFT .Linit_sve .Lskip_sve
.Linit_sve: /* SVE register access */ .Linit_sve: /* SVE register access */
mrs x0, cptr_el2 // Disable SVE traps mrs x0, cptr_el2 // Disable SVE traps
......
...@@ -74,7 +74,7 @@ static const struct ftr_set_desc pfr0 __initconst = { ...@@ -74,7 +74,7 @@ static const struct ftr_set_desc pfr0 __initconst = {
.name = "id_aa64pfr0", .name = "id_aa64pfr0",
.override = &id_aa64pfr0_override, .override = &id_aa64pfr0_override,
.fields = { .fields = {
FIELD("sve", ID_AA64PFR0_SVE_SHIFT, pfr0_sve_filter), FIELD("sve", ID_AA64PFR0_EL1_SVE_SHIFT, pfr0_sve_filter),
{} {}
}, },
}; };
......
...@@ -168,7 +168,7 @@ static enum mitigation_state spectre_v2_get_cpu_hw_mitigation_state(void) ...@@ -168,7 +168,7 @@ static enum mitigation_state spectre_v2_get_cpu_hw_mitigation_state(void)
/* If the CPU has CSV2 set, we're safe */ /* If the CPU has CSV2 set, we're safe */
pfr0 = read_cpuid(ID_AA64PFR0_EL1); pfr0 = read_cpuid(ID_AA64PFR0_EL1);
if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT)) if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_CSV2_SHIFT))
return SPECTRE_UNAFFECTED; return SPECTRE_UNAFFECTED;
/* Alternatively, we have a list of unaffected CPUs */ /* Alternatively, we have a list of unaffected CPUs */
......
...@@ -35,9 +35,9 @@ ...@@ -35,9 +35,9 @@
* - Data Independent Timing * - Data Independent Timing
*/ */
#define PVM_ID_AA64PFR0_ALLOW (\ #define PVM_ID_AA64PFR0_ALLOW (\
ARM64_FEATURE_MASK(ID_AA64PFR0_FP) | \ ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_FP) | \
ARM64_FEATURE_MASK(ID_AA64PFR0_ASIMD) | \ ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_ASIMD) | \
ARM64_FEATURE_MASK(ID_AA64PFR0_DIT) \ ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_DIT) \
) )
/* /*
...@@ -49,11 +49,11 @@ ...@@ -49,11 +49,11 @@
* Supported by KVM * Supported by KVM
*/ */
#define PVM_ID_AA64PFR0_RESTRICT_UNSIGNED (\ #define PVM_ID_AA64PFR0_RESTRICT_UNSIGNED (\
FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL0), ID_AA64PFR0_ELx_64BIT_ONLY) | \ FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0), ID_AA64PFR0_EL1_ELx_64BIT_ONLY) | \
FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1), ID_AA64PFR0_ELx_64BIT_ONLY) | \ FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL1), ID_AA64PFR0_EL1_ELx_64BIT_ONLY) | \
FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL2), ID_AA64PFR0_ELx_64BIT_ONLY) | \ FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL2), ID_AA64PFR0_EL1_ELx_64BIT_ONLY) | \
FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL3), ID_AA64PFR0_ELx_64BIT_ONLY) | \ FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL3), ID_AA64PFR0_EL1_ELx_64BIT_ONLY) | \
FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_RAS), ID_AA64PFR0_RAS_V1) \ FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_RAS), ID_AA64PFR0_EL1_RAS_V1) \
) )
/* /*
......
...@@ -20,35 +20,35 @@ static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu) ...@@ -20,35 +20,35 @@ static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu)
u64 cptr_set = 0; u64 cptr_set = 0;
/* Protected KVM does not support AArch32 guests. */ /* Protected KVM does not support AArch32 guests. */
BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL0), BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0),
PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_ELx_64BIT_ONLY); PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_EL1_ELx_64BIT_ONLY);
BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1), BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL1),
PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_ELx_64BIT_ONLY); PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_EL1_ELx_64BIT_ONLY);
/* /*
* Linux guests assume support for floating-point and Advanced SIMD. Do * Linux guests assume support for floating-point and Advanced SIMD. Do
* not change the trapping behavior for these from the KVM default. * not change the trapping behavior for these from the KVM default.
*/ */
BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_FP), BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_FP),
PVM_ID_AA64PFR0_ALLOW)); PVM_ID_AA64PFR0_ALLOW));
BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_ASIMD), BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_ASIMD),
PVM_ID_AA64PFR0_ALLOW)); PVM_ID_AA64PFR0_ALLOW));
/* Trap RAS unless all current versions are supported */ /* Trap RAS unless all current versions are supported */
if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_RAS), feature_ids) < if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_RAS), feature_ids) <
ID_AA64PFR0_RAS_V1P1) { ID_AA64PFR0_EL1_RAS_V1P1) {
hcr_set |= HCR_TERR | HCR_TEA; hcr_set |= HCR_TERR | HCR_TEA;
hcr_clear |= HCR_FIEN; hcr_clear |= HCR_FIEN;
} }
/* Trap AMU */ /* Trap AMU */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_AMU), feature_ids)) { if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AMU), feature_ids)) {
hcr_clear |= HCR_AMVOFFEN; hcr_clear |= HCR_AMVOFFEN;
cptr_set |= CPTR_EL2_TAM; cptr_set |= CPTR_EL2_TAM;
} }
/* Trap SVE */ /* Trap SVE */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_SVE), feature_ids)) if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), feature_ids))
cptr_set |= CPTR_EL2_TZ; cptr_set |= CPTR_EL2_TZ;
vcpu->arch.hcr_el2 |= hcr_set; vcpu->arch.hcr_el2 |= hcr_set;
......
...@@ -92,9 +92,9 @@ static u64 get_pvm_id_aa64pfr0(const struct kvm_vcpu *vcpu) ...@@ -92,9 +92,9 @@ static u64 get_pvm_id_aa64pfr0(const struct kvm_vcpu *vcpu)
PVM_ID_AA64PFR0_RESTRICT_UNSIGNED); PVM_ID_AA64PFR0_RESTRICT_UNSIGNED);
/* Spectre and Meltdown mitigation in KVM */ /* Spectre and Meltdown mitigation in KVM */
set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV2), set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2),
(u64)kvm->arch.pfr0_csv2); (u64)kvm->arch.pfr0_csv2);
set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3), set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3),
(u64)kvm->arch.pfr0_csv3); (u64)kvm->arch.pfr0_csv3);
return (id_aa64pfr0_el1_sys_val & allow_mask) | set_mask; return (id_aa64pfr0_el1_sys_val & allow_mask) | set_mask;
...@@ -281,8 +281,8 @@ static bool pvm_access_id_aarch32(struct kvm_vcpu *vcpu, ...@@ -281,8 +281,8 @@ static bool pvm_access_id_aarch32(struct kvm_vcpu *vcpu,
* No support for AArch32 guests, therefore, pKVM has no sanitized copy * No support for AArch32 guests, therefore, pKVM has no sanitized copy
* of AArch32 feature id registers. * of AArch32 feature id registers.
*/ */
BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1), BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL1),
PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) > ID_AA64PFR0_ELx_64BIT_ONLY); PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) > ID_AA64PFR0_EL1_ELx_64BIT_ONLY);
return pvm_access_raz_wi(vcpu, p, r); return pvm_access_raz_wi(vcpu, p, r);
} }
......
...@@ -1077,15 +1077,15 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, ...@@ -1077,15 +1077,15 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,
switch (id) { switch (id) {
case SYS_ID_AA64PFR0_EL1: case SYS_ID_AA64PFR0_EL1:
if (!vcpu_has_sve(vcpu)) if (!vcpu_has_sve(vcpu))
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_SVE); val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE);
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_AMU); val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AMU);
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_CSV2); val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2);
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV2), (u64)vcpu->kvm->arch.pfr0_csv2); val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2), (u64)vcpu->kvm->arch.pfr0_csv2);
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3); val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3);
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3), (u64)vcpu->kvm->arch.pfr0_csv3); val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3), (u64)vcpu->kvm->arch.pfr0_csv3);
if (kvm_vgic_global_state.type == VGIC_V3) { if (kvm_vgic_global_state.type == VGIC_V3) {
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_GIC); val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC);
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_GIC), 1); val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC), 1);
} }
break; break;
case SYS_ID_AA64PFR1_EL1: case SYS_ID_AA64PFR1_EL1:
...@@ -1196,21 +1196,21 @@ static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu, ...@@ -1196,21 +1196,21 @@ static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
* it doesn't promise more than what is actually provided (the * it doesn't promise more than what is actually provided (the
* guest could otherwise be covered in ectoplasmic residue). * guest could otherwise be covered in ectoplasmic residue).
*/ */
csv2 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_CSV2_SHIFT); csv2 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_EL1_CSV2_SHIFT);
if (csv2 > 1 || if (csv2 > 1 ||
(csv2 && arm64_get_spectre_v2_state() != SPECTRE_UNAFFECTED)) (csv2 && arm64_get_spectre_v2_state() != SPECTRE_UNAFFECTED))
return -EINVAL; return -EINVAL;
/* Same thing for CSV3 */ /* Same thing for CSV3 */
csv3 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_CSV3_SHIFT); csv3 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_EL1_CSV3_SHIFT);
if (csv3 > 1 || if (csv3 > 1 ||
(csv3 && arm64_get_meltdown_state() != SPECTRE_UNAFFECTED)) (csv3 && arm64_get_meltdown_state() != SPECTRE_UNAFFECTED))
return -EINVAL; return -EINVAL;
/* We can only differ with CSV[23], and anything else is an error */ /* We can only differ with CSV[23], and anything else is an error */
val ^= read_id_reg(vcpu, rd, false); val ^= read_id_reg(vcpu, rd, false);
val &= ~((0xFUL << ID_AA64PFR0_CSV2_SHIFT) | val &= ~((0xFUL << ID_AA64PFR0_EL1_CSV2_SHIFT) |
(0xFUL << ID_AA64PFR0_CSV3_SHIFT)); (0xFUL << ID_AA64PFR0_EL1_CSV3_SHIFT));
if (val) if (val)
return -EINVAL; return -EINVAL;
...@@ -1825,7 +1825,7 @@ static bool trap_dbgdidr(struct kvm_vcpu *vcpu, ...@@ -1825,7 +1825,7 @@ static bool trap_dbgdidr(struct kvm_vcpu *vcpu,
} else { } else {
u64 dfr = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1); u64 dfr = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
u64 pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); u64 pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL3_SHIFT); u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL1_EL3_SHIFT);
p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) | p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) |
(((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) | (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) |
......
...@@ -94,7 +94,7 @@ bool gic_cpuif_has_vsgi(void) ...@@ -94,7 +94,7 @@ bool gic_cpuif_has_vsgi(void)
{ {
unsigned long fld, reg = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); unsigned long fld, reg = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64PFR0_GIC_SHIFT); fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64PFR0_EL1_GIC_SHIFT);
return fld >= 0x3; return fld >= 0x3;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment