Commit a69283ae authored by Fuad Tabba's avatar Fuad Tabba Committed by Marc Zyngier

KVM: arm64: Refactor CPACR trap bit setting/clearing to use ELx format

When setting/clearing CPACR bits for EL0 and EL1, use the ELx
format of the bits, which covers both. This makes the code
clearer, and reduces the chances of accidentally missing a bit.

No functional change intended.
Reviewed-by: default avatarOliver Upton <oliver.upton@linux.dev>
Signed-off-by: default avatarFuad Tabba <tabba@google.com>
Link: https://lore.kernel.org/r/20240603122852.3923848-9-tabba@google.comSigned-off-by: default avatarMarc Zyngier <maz@kernel.org>
parent 1696fc21
...@@ -146,7 +146,7 @@ ...@@ -146,7 +146,7 @@
/* Coprocessor traps */ /* Coprocessor traps */
.macro __init_el2_cptr .macro __init_el2_cptr
__check_hvhe .LnVHE_\@, x1 __check_hvhe .LnVHE_\@, x1
mov x0, #(CPACR_EL1_FPEN_EL1EN | CPACR_EL1_FPEN_EL0EN) mov x0, #CPACR_ELx_FPEN
msr cpacr_el1, x0 msr cpacr_el1, x0
b .Lskip_set_cptr_\@ b .Lskip_set_cptr_\@
.LnVHE_\@: .LnVHE_\@:
...@@ -277,7 +277,7 @@ ...@@ -277,7 +277,7 @@
// (h)VHE case // (h)VHE case
mrs x0, cpacr_el1 // Disable SVE traps mrs x0, cpacr_el1 // Disable SVE traps
orr x0, x0, #(CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN) orr x0, x0, #CPACR_ELx_ZEN
msr cpacr_el1, x0 msr cpacr_el1, x0
b .Lskip_set_cptr_\@ b .Lskip_set_cptr_\@
...@@ -298,7 +298,7 @@ ...@@ -298,7 +298,7 @@
// (h)VHE case // (h)VHE case
mrs x0, cpacr_el1 // Disable SME traps mrs x0, cpacr_el1 // Disable SME traps
orr x0, x0, #(CPACR_EL1_SMEN_EL0EN | CPACR_EL1_SMEN_EL1EN) orr x0, x0, #CPACR_ELx_SMEN
msr cpacr_el1, x0 msr cpacr_el1, x0
b .Lskip_set_cptr_sme_\@ b .Lskip_set_cptr_sme_\@
......
...@@ -632,17 +632,16 @@ static __always_inline u64 kvm_get_reset_cptr_el2(struct kvm_vcpu *vcpu) ...@@ -632,17 +632,16 @@ static __always_inline u64 kvm_get_reset_cptr_el2(struct kvm_vcpu *vcpu)
u64 val; u64 val;
if (has_vhe()) { if (has_vhe()) {
val = (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN | val = (CPACR_ELx_FPEN | CPACR_EL1_ZEN_EL1EN);
CPACR_EL1_ZEN_EL1EN);
if (cpus_have_final_cap(ARM64_SME)) if (cpus_have_final_cap(ARM64_SME))
val |= CPACR_EL1_SMEN_EL1EN; val |= CPACR_EL1_SMEN_EL1EN;
} else if (has_hvhe()) { } else if (has_hvhe()) {
val = (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN); val = CPACR_ELx_FPEN;
if (!vcpu_has_sve(vcpu) || !guest_owns_fp_regs()) if (!vcpu_has_sve(vcpu) || !guest_owns_fp_regs())
val |= CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN; val |= CPACR_ELx_ZEN;
if (cpus_have_final_cap(ARM64_SME)) if (cpus_have_final_cap(ARM64_SME))
val |= CPACR_EL1_SMEN_EL1EN | CPACR_EL1_SMEN_EL0EN; val |= CPACR_ELx_SMEN;
} else { } else {
val = CPTR_NVHE_EL2_RES1; val = CPTR_NVHE_EL2_RES1;
......
...@@ -161,9 +161,7 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) ...@@ -161,9 +161,7 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
if (has_vhe() && system_supports_sme()) { if (has_vhe() && system_supports_sme()) {
/* Also restore EL0 state seen on entry */ /* Also restore EL0 state seen on entry */
if (vcpu_get_flag(vcpu, HOST_SME_ENABLED)) if (vcpu_get_flag(vcpu, HOST_SME_ENABLED))
sysreg_clear_set(CPACR_EL1, 0, sysreg_clear_set(CPACR_EL1, 0, CPACR_ELx_SMEN);
CPACR_EL1_SMEN_EL0EN |
CPACR_EL1_SMEN_EL1EN);
else else
sysreg_clear_set(CPACR_EL1, sysreg_clear_set(CPACR_EL1,
CPACR_EL1_SMEN_EL0EN, CPACR_EL1_SMEN_EL0EN,
......
...@@ -65,7 +65,7 @@ static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu) ...@@ -65,7 +65,7 @@ static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu)
/* Trap SVE */ /* Trap SVE */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), feature_ids)) { if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), feature_ids)) {
if (has_hvhe()) if (has_hvhe())
cptr_clear |= CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN; cptr_clear |= CPACR_ELx_ZEN;
else else
cptr_set |= CPTR_EL2_TZ; cptr_set |= CPTR_EL2_TZ;
} }
......
...@@ -48,15 +48,14 @@ static void __activate_traps(struct kvm_vcpu *vcpu) ...@@ -48,15 +48,14 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
val |= has_hvhe() ? CPACR_EL1_TTA : CPTR_EL2_TTA; val |= has_hvhe() ? CPACR_EL1_TTA : CPTR_EL2_TTA;
if (cpus_have_final_cap(ARM64_SME)) { if (cpus_have_final_cap(ARM64_SME)) {
if (has_hvhe()) if (has_hvhe())
val &= ~(CPACR_EL1_SMEN_EL1EN | CPACR_EL1_SMEN_EL0EN); val &= ~CPACR_ELx_SMEN;
else else
val |= CPTR_EL2_TSM; val |= CPTR_EL2_TSM;
} }
if (!guest_owns_fp_regs()) { if (!guest_owns_fp_regs()) {
if (has_hvhe()) if (has_hvhe())
val &= ~(CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN | val &= ~(CPACR_ELx_FPEN | CPACR_ELx_ZEN);
CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN);
else else
val |= CPTR_EL2_TFP | CPTR_EL2_TZ; val |= CPTR_EL2_TFP | CPTR_EL2_TZ;
......
...@@ -93,8 +93,7 @@ static void __activate_traps(struct kvm_vcpu *vcpu) ...@@ -93,8 +93,7 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
val = read_sysreg(cpacr_el1); val = read_sysreg(cpacr_el1);
val |= CPACR_ELx_TTA; val |= CPACR_ELx_TTA;
val &= ~(CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN | val &= ~(CPACR_ELx_ZEN | CPACR_ELx_SMEN);
CPACR_EL1_SMEN_EL0EN | CPACR_EL1_SMEN_EL1EN);
/* /*
* With VHE (HCR.E2H == 1), accesses to CPACR_EL1 are routed to * With VHE (HCR.E2H == 1), accesses to CPACR_EL1 are routed to
...@@ -109,9 +108,9 @@ static void __activate_traps(struct kvm_vcpu *vcpu) ...@@ -109,9 +108,9 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
if (guest_owns_fp_regs()) { if (guest_owns_fp_regs()) {
if (vcpu_has_sve(vcpu)) if (vcpu_has_sve(vcpu))
val |= CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN; val |= CPACR_ELx_ZEN;
} else { } else {
val &= ~(CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN); val &= ~CPACR_ELx_FPEN;
__activate_traps_fpsimd32(vcpu); __activate_traps_fpsimd32(vcpu);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment