Commit b1ea1d76 authored by Marc Zyngier's avatar Marc Zyngier

KVM: arm64: Map AArch32 cp15 register to AArch64 sysregs

Move all the cp15 registers over to their AArch64 counterpart.
This requires the annotation of a few of them (such as the usual
DFAR/IFAR vs FAR_EL1), and a new helper that generates mask/shift
pairs for the various configurations.
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
parent 6ed6750f
...@@ -128,6 +128,24 @@ static bool access_dcsw(struct kvm_vcpu *vcpu, ...@@ -128,6 +128,24 @@ static bool access_dcsw(struct kvm_vcpu *vcpu,
return true; return true;
} }
static void get_access_mask(const struct sys_reg_desc *r, u64 *mask, u64 *shift)
{
switch (r->aarch32_map) {
case AA32_LO:
*mask = GENMASK_ULL(31, 0);
*shift = 0;
break;
case AA32_HI:
*mask = GENMASK_ULL(63, 32);
*shift = 32;
break;
default:
*mask = GENMASK_ULL(63, 0);
*shift = 0;
break;
}
}
/* /*
* Generic accessor for VM registers. Only called as long as HCR_TVM * Generic accessor for VM registers. Only called as long as HCR_TVM
* is set. If the guest enables the MMU, we stop trapping the VM * is set. If the guest enables the MMU, we stop trapping the VM
...@@ -138,26 +156,21 @@ static bool access_vm_reg(struct kvm_vcpu *vcpu, ...@@ -138,26 +156,21 @@ static bool access_vm_reg(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *r) const struct sys_reg_desc *r)
{ {
bool was_enabled = vcpu_has_cache_enabled(vcpu); bool was_enabled = vcpu_has_cache_enabled(vcpu);
u64 val; u64 val, mask, shift;
int reg = r->reg;
BUG_ON(!p->is_write); BUG_ON(!p->is_write);
/* See the 32bit mapping in kvm_host.h */ get_access_mask(r, &mask, &shift);
if (p->is_aarch32)
reg = r->reg / 2;
if (!p->is_aarch32 || !p->is_32bit) { if (~mask) {
val = p->regval; val = vcpu_read_sys_reg(vcpu, r->reg);
val &= ~mask;
} else { } else {
val = vcpu_read_sys_reg(vcpu, reg); val = 0;
if (r->reg % 2)
val = (p->regval << 32) | (u64)lower_32_bits(val);
else
val = ((u64)upper_32_bits(val) << 32) |
lower_32_bits(p->regval);
} }
vcpu_write_sys_reg(vcpu, val, reg);
val |= (p->regval & (mask >> shift)) << shift;
vcpu_write_sys_reg(vcpu, val, r->reg);
kvm_toggle_cache(vcpu, was_enabled); kvm_toggle_cache(vcpu, was_enabled);
return true; return true;
...@@ -167,17 +180,13 @@ static bool access_actlr(struct kvm_vcpu *vcpu, ...@@ -167,17 +180,13 @@ static bool access_actlr(struct kvm_vcpu *vcpu,
struct sys_reg_params *p, struct sys_reg_params *p,
const struct sys_reg_desc *r) const struct sys_reg_desc *r)
{ {
u64 mask, shift;
if (p->is_write) if (p->is_write)
return ignore_write(vcpu, p); return ignore_write(vcpu, p);
p->regval = vcpu_read_sys_reg(vcpu, ACTLR_EL1); get_access_mask(r, &mask, &shift);
p->regval = (vcpu_read_sys_reg(vcpu, r->reg) & mask) >> shift;
if (p->is_aarch32) {
if (r->Op2 & 2)
p->regval = upper_32_bits(p->regval);
else
p->regval = lower_32_bits(p->regval);
}
return true; return true;
} }
...@@ -1264,10 +1273,6 @@ static bool access_csselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, ...@@ -1264,10 +1273,6 @@ static bool access_csselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
{ {
int reg = r->reg; int reg = r->reg;
/* See the 32bit mapping in kvm_host.h */
if (p->is_aarch32)
reg = r->reg / 2;
if (p->is_write) if (p->is_write)
vcpu_write_sys_reg(vcpu, p->regval, reg); vcpu_write_sys_reg(vcpu, p->regval, reg);
else else
...@@ -1919,20 +1924,29 @@ static const struct sys_reg_desc cp14_64_regs[] = { ...@@ -1919,20 +1924,29 @@ static const struct sys_reg_desc cp14_64_regs[] = {
*/ */
static const struct sys_reg_desc cp15_regs[] = { static const struct sys_reg_desc cp15_regs[] = {
{ Op1( 0), CRn( 0), CRm( 0), Op2( 1), access_ctr }, { Op1( 0), CRn( 0), CRm( 0), Op2( 1), access_ctr },
{ Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, c1_SCTLR }, { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, SCTLR_EL1 },
{ Op1( 0), CRn( 1), CRm( 0), Op2( 1), access_actlr }, /* ACTLR */
{ Op1( 0), CRn( 1), CRm( 0), Op2( 3), access_actlr }, { AA32(LO), Op1( 0), CRn( 1), CRm( 0), Op2( 1), access_actlr, NULL, ACTLR_EL1 },
{ Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 }, /* ACTLR2 */
{ Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 }, { AA32(HI), Op1( 0), CRn( 1), CRm( 0), Op2( 3), access_actlr, NULL, ACTLR_EL1 },
{ Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR }, { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, TTBR0_EL1 },
{ Op1( 0), CRn( 2), CRm( 0), Op2( 3), access_vm_reg, NULL, c2_TTBCR2 }, { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, TTBR1_EL1 },
{ Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, c3_DACR }, /* TTBCR */
{ Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, c5_DFSR }, { AA32(LO), Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, TCR_EL1 },
{ Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, c5_IFSR }, /* TTBCR2 */
{ Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, c5_ADFSR }, { AA32(HI), Op1( 0), CRn( 2), CRm( 0), Op2( 3), access_vm_reg, NULL, TCR_EL1 },
{ Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, c5_AIFSR }, { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, DACR32_EL2 },
{ Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, c6_DFAR }, /* DFSR */
{ Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, c6_IFAR }, { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, ESR_EL1 },
{ Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, IFSR32_EL2 },
/* ADFSR */
{ Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, AFSR0_EL1 },
/* AIFSR */
{ Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, AFSR1_EL1 },
/* DFAR */
{ AA32(LO), Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, FAR_EL1 },
/* IFAR */
{ AA32(HI), Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, FAR_EL1 },
/* /*
* DC{C,I,CI}SW operations: * DC{C,I,CI}SW operations:
...@@ -1958,15 +1972,19 @@ static const struct sys_reg_desc cp15_regs[] = { ...@@ -1958,15 +1972,19 @@ static const struct sys_reg_desc cp15_regs[] = {
{ Op1( 0), CRn( 9), CRm(14), Op2( 2), access_pminten }, { Op1( 0), CRn( 9), CRm(14), Op2( 2), access_pminten },
{ Op1( 0), CRn( 9), CRm(14), Op2( 3), access_pmovs }, { Op1( 0), CRn( 9), CRm(14), Op2( 3), access_pmovs },
{ Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR }, /* PRRR/MAIR0 */
{ Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR }, { AA32(LO), Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, MAIR_EL1 },
{ Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 }, /* NMRR/MAIR1 */
{ Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 }, { AA32(HI), Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, MAIR_EL1 },
/* AMAIR0 */
{ AA32(LO), Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, AMAIR_EL1 },
/* AMAIR1 */
{ AA32(HI), Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, AMAIR_EL1 },
/* ICC_SRE */ /* ICC_SRE */
{ Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre }, { Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre },
{ Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID }, { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, CONTEXTIDR_EL1 },
/* Arch Tmers */ /* Arch Tmers */
{ SYS_DESC(SYS_AARCH32_CNTP_TVAL), access_arch_timer }, { SYS_DESC(SYS_AARCH32_CNTP_TVAL), access_arch_timer },
...@@ -2041,14 +2059,14 @@ static const struct sys_reg_desc cp15_regs[] = { ...@@ -2041,14 +2059,14 @@ static const struct sys_reg_desc cp15_regs[] = {
{ Op1(1), CRn( 0), CRm( 0), Op2(0), access_ccsidr }, { Op1(1), CRn( 0), CRm( 0), Op2(0), access_ccsidr },
{ Op1(1), CRn( 0), CRm( 0), Op2(1), access_clidr }, { Op1(1), CRn( 0), CRm( 0), Op2(1), access_clidr },
{ Op1(2), CRn( 0), CRm( 0), Op2(0), access_csselr, NULL, c0_CSSELR }, { Op1(2), CRn( 0), CRm( 0), Op2(0), access_csselr, NULL, CSSELR_EL1 },
}; };
static const struct sys_reg_desc cp15_64_regs[] = { static const struct sys_reg_desc cp15_64_regs[] = {
{ Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 }, { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR0_EL1 },
{ Op1( 0), CRn( 0), CRm( 9), Op2( 0), access_pmu_evcntr }, { Op1( 0), CRn( 0), CRm( 9), Op2( 0), access_pmu_evcntr },
{ Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI1R */ { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI1R */
{ Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 }, { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR1_EL1 },
{ Op1( 1), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_ASGI1R */ { Op1( 1), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_ASGI1R */
{ Op1( 2), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI0R */ { Op1( 2), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI0R */
{ SYS_DESC(SYS_AARCH32_CNTP_CVAL), access_arch_timer }, { SYS_DESC(SYS_AARCH32_CNTP_CVAL), access_arch_timer },
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment