Commit 1da42c34 authored by Marc Zyngier's avatar Marc Zyngier

KVM: arm64: Map AArch32 cp14 register to AArch64 sysregs

Similarly to what has been done on the cp15 front, repaint the
debug registers to use their AArch64 counterparts. This results
in some simplification as we can remove the 32bit-specific
accessors.
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
parent b1ea1d76
...@@ -555,14 +555,6 @@ static inline bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg) ...@@ -555,14 +555,6 @@ static inline bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg)
return true; return true;
} }
/*
* CP14 and CP15 live in the same array, as they are backed by the
* same system registers.
*/
#define CPx_BIAS IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)
#define vcpu_cp14(v,r) ((v)->arch.ctxt.copro[(r) ^ CPx_BIAS])
struct kvm_vm_stat { struct kvm_vm_stat {
ulong remote_tlb_flush; ulong remote_tlb_flush;
}; };
......
...@@ -366,26 +366,30 @@ static bool trap_debug_regs(struct kvm_vcpu *vcpu, ...@@ -366,26 +366,30 @@ static bool trap_debug_regs(struct kvm_vcpu *vcpu,
*/ */
static void reg_to_dbg(struct kvm_vcpu *vcpu, static void reg_to_dbg(struct kvm_vcpu *vcpu,
struct sys_reg_params *p, struct sys_reg_params *p,
const struct sys_reg_desc *rd,
u64 *dbg_reg) u64 *dbg_reg)
{ {
u64 val = p->regval; u64 mask, shift, val;
if (p->is_32bit) { get_access_mask(rd, &mask, &shift);
val &= 0xffffffffUL;
val |= ((*dbg_reg >> 32) << 32);
}
val = *dbg_reg;
val &= ~mask;
val |= (p->regval & (mask >> shift)) << shift;
*dbg_reg = val; *dbg_reg = val;
vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY; vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
} }
static void dbg_to_reg(struct kvm_vcpu *vcpu, static void dbg_to_reg(struct kvm_vcpu *vcpu,
struct sys_reg_params *p, struct sys_reg_params *p,
const struct sys_reg_desc *rd,
u64 *dbg_reg) u64 *dbg_reg)
{ {
p->regval = *dbg_reg; u64 mask, shift;
if (p->is_32bit)
p->regval &= 0xffffffffUL; get_access_mask(rd, &mask, &shift);
p->regval = (*dbg_reg & mask) >> shift;
} }
static bool trap_bvr(struct kvm_vcpu *vcpu, static bool trap_bvr(struct kvm_vcpu *vcpu,
...@@ -395,9 +399,9 @@ static bool trap_bvr(struct kvm_vcpu *vcpu, ...@@ -395,9 +399,9 @@ static bool trap_bvr(struct kvm_vcpu *vcpu,
u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
if (p->is_write) if (p->is_write)
reg_to_dbg(vcpu, p, dbg_reg); reg_to_dbg(vcpu, p, rd, dbg_reg);
else else
dbg_to_reg(vcpu, p, dbg_reg); dbg_to_reg(vcpu, p, rd, dbg_reg);
trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg); trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
...@@ -437,9 +441,9 @@ static bool trap_bcr(struct kvm_vcpu *vcpu, ...@@ -437,9 +441,9 @@ static bool trap_bcr(struct kvm_vcpu *vcpu,
u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg]; u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
if (p->is_write) if (p->is_write)
reg_to_dbg(vcpu, p, dbg_reg); reg_to_dbg(vcpu, p, rd, dbg_reg);
else else
dbg_to_reg(vcpu, p, dbg_reg); dbg_to_reg(vcpu, p, rd, dbg_reg);
trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg); trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
...@@ -480,9 +484,9 @@ static bool trap_wvr(struct kvm_vcpu *vcpu, ...@@ -480,9 +484,9 @@ static bool trap_wvr(struct kvm_vcpu *vcpu,
u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]; u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
if (p->is_write) if (p->is_write)
reg_to_dbg(vcpu, p, dbg_reg); reg_to_dbg(vcpu, p, rd, dbg_reg);
else else
dbg_to_reg(vcpu, p, dbg_reg); dbg_to_reg(vcpu, p, rd, dbg_reg);
trace_trap_reg(__func__, rd->reg, p->is_write, trace_trap_reg(__func__, rd->reg, p->is_write,
vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]); vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]);
...@@ -523,9 +527,9 @@ static bool trap_wcr(struct kvm_vcpu *vcpu, ...@@ -523,9 +527,9 @@ static bool trap_wcr(struct kvm_vcpu *vcpu,
u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg]; u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
if (p->is_write) if (p->is_write)
reg_to_dbg(vcpu, p, dbg_reg); reg_to_dbg(vcpu, p, rd, dbg_reg);
else else
dbg_to_reg(vcpu, p, dbg_reg); dbg_to_reg(vcpu, p, rd, dbg_reg);
trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg); trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
...@@ -1744,66 +1748,27 @@ static bool trap_dbgidr(struct kvm_vcpu *vcpu, ...@@ -1744,66 +1748,27 @@ static bool trap_dbgidr(struct kvm_vcpu *vcpu,
} }
} }
static bool trap_debug32(struct kvm_vcpu *vcpu, /*
struct sys_reg_params *p, * AArch32 debug register mappings
const struct sys_reg_desc *r)
{
if (p->is_write) {
vcpu_cp14(vcpu, r->reg) = p->regval;
vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
} else {
p->regval = vcpu_cp14(vcpu, r->reg);
}
return true;
}
/* AArch32 debug register mappings
* *
* AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0] * AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0]
* AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32] * AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32]
* *
* All control registers and watchpoint value registers are mapped to * None of the other registers share their location, so treat them as
* the lower 32 bits of their AArch64 equivalents. We share the trap * if they were 64bit.
* handlers with the above AArch64 code which checks what mode the
* system is in.
*/ */
#define DBG_BCR_BVR_WCR_WVR(n) \
static bool trap_xvr(struct kvm_vcpu *vcpu, /* DBGBVRn */ \
struct sys_reg_params *p, { AA32(LO), Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n }, \
const struct sys_reg_desc *rd) /* DBGBCRn */ \
{ { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n }, \
u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; /* DBGWVRn */ \
{ Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n }, \
if (p->is_write) { /* DBGWCRn */ \
u64 val = *dbg_reg;
val &= 0xffffffffUL;
val |= p->regval << 32;
*dbg_reg = val;
vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
} else {
p->regval = *dbg_reg >> 32;
}
trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
return true;
}
#define DBG_BCR_BVR_WCR_WVR(n) \
/* DBGBVRn */ \
{ Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n }, \
/* DBGBCRn */ \
{ Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n }, \
/* DBGWVRn */ \
{ Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n }, \
/* DBGWCRn */ \
{ Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n } { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n }
#define DBGBXVR(n) \ #define DBGBXVR(n) \
{ Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_xvr, NULL, n } { AA32(HI), Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_bvr, NULL, n }
/* /*
* Trapped cp14 registers. We generally ignore most of the external * Trapped cp14 registers. We generally ignore most of the external
...@@ -1821,9 +1786,9 @@ static const struct sys_reg_desc cp14_regs[] = { ...@@ -1821,9 +1786,9 @@ static const struct sys_reg_desc cp14_regs[] = {
{ Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi }, { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi },
DBG_BCR_BVR_WCR_WVR(1), DBG_BCR_BVR_WCR_WVR(1),
/* DBGDCCINT */ /* DBGDCCINT */
{ Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32, NULL, cp14_DBGDCCINT }, { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug_regs, NULL, MDCCINT_EL1 },
/* DBGDSCRext */ /* DBGDSCRext */
{ Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32, NULL, cp14_DBGDSCRext }, { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug_regs, NULL, MDSCR_EL1 },
DBG_BCR_BVR_WCR_WVR(2), DBG_BCR_BVR_WCR_WVR(2),
/* DBGDTR[RT]Xint */ /* DBGDTR[RT]Xint */
{ Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi }, { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi },
...@@ -1838,7 +1803,7 @@ static const struct sys_reg_desc cp14_regs[] = { ...@@ -1838,7 +1803,7 @@ static const struct sys_reg_desc cp14_regs[] = {
{ Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi }, { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi },
DBG_BCR_BVR_WCR_WVR(6), DBG_BCR_BVR_WCR_WVR(6),
/* DBGVCR */ /* DBGVCR */
{ Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32, NULL, cp14_DBGVCR }, { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug_regs, NULL, DBGVCR32_EL2 },
DBG_BCR_BVR_WCR_WVR(7), DBG_BCR_BVR_WCR_WVR(7),
DBG_BCR_BVR_WCR_WVR(8), DBG_BCR_BVR_WCR_WVR(8),
DBG_BCR_BVR_WCR_WVR(9), DBG_BCR_BVR_WCR_WVR(9),
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment