Commit dffe042f authored by James Hogan's avatar James Hogan

KVM: MIPS/VZ: Support guest CP0_[X]ContextConfig

Add support for VZ guest CP0_ContextConfig and CP0_XContextConfig
(MIPS64 only) registers, as found on P5600 and P6600 cores. These guest
registers need initialising, context switching, and exposing via the KVM
ioctl API when they are present.
Signed-off-by: default avatarJames Hogan <james.hogan@imgtec.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: "Radim Krčmář" <rkrcmar@redhat.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: linux-mips@linux-mips.org
Cc: kvm@vger.kernel.org
Cc: linux-doc@vger.kernel.org
parent edc89260
...@@ -2073,7 +2073,9 @@ registers, find a list below: ...@@ -2073,7 +2073,9 @@ registers, find a list below:
MIPS | KVM_REG_MIPS_CP0_ENTRYLO0 | 64 MIPS | KVM_REG_MIPS_CP0_ENTRYLO0 | 64
MIPS | KVM_REG_MIPS_CP0_ENTRYLO1 | 64 MIPS | KVM_REG_MIPS_CP0_ENTRYLO1 | 64
MIPS | KVM_REG_MIPS_CP0_CONTEXT | 64 MIPS | KVM_REG_MIPS_CP0_CONTEXT | 64
MIPS | KVM_REG_MIPS_CP0_CONTEXTCONFIG| 32
MIPS | KVM_REG_MIPS_CP0_USERLOCAL | 64 MIPS | KVM_REG_MIPS_CP0_USERLOCAL | 64
MIPS | KVM_REG_MIPS_CP0_XCONTEXTCONFIG| 64
MIPS | KVM_REG_MIPS_CP0_PAGEMASK | 32 MIPS | KVM_REG_MIPS_CP0_PAGEMASK | 32
MIPS | KVM_REG_MIPS_CP0_PAGEGRAIN | 32 MIPS | KVM_REG_MIPS_CP0_PAGEGRAIN | 32
MIPS | KVM_REG_MIPS_CP0_WIRED | 32 MIPS | KVM_REG_MIPS_CP0_WIRED | 32
......
...@@ -34,7 +34,9 @@ ...@@ -34,7 +34,9 @@
#define KVM_REG_MIPS_CP0_ENTRYLO0 MIPS_CP0_64(2, 0) #define KVM_REG_MIPS_CP0_ENTRYLO0 MIPS_CP0_64(2, 0)
#define KVM_REG_MIPS_CP0_ENTRYLO1 MIPS_CP0_64(3, 0) #define KVM_REG_MIPS_CP0_ENTRYLO1 MIPS_CP0_64(3, 0)
#define KVM_REG_MIPS_CP0_CONTEXT MIPS_CP0_64(4, 0) #define KVM_REG_MIPS_CP0_CONTEXT MIPS_CP0_64(4, 0)
#define KVM_REG_MIPS_CP0_CONTEXTCONFIG MIPS_CP0_32(4, 1)
#define KVM_REG_MIPS_CP0_USERLOCAL MIPS_CP0_64(4, 2) #define KVM_REG_MIPS_CP0_USERLOCAL MIPS_CP0_64(4, 2)
#define KVM_REG_MIPS_CP0_XCONTEXTCONFIG MIPS_CP0_64(4, 3)
#define KVM_REG_MIPS_CP0_PAGEMASK MIPS_CP0_32(5, 0) #define KVM_REG_MIPS_CP0_PAGEMASK MIPS_CP0_32(5, 0)
#define KVM_REG_MIPS_CP0_PAGEGRAIN MIPS_CP0_32(5, 1) #define KVM_REG_MIPS_CP0_PAGEGRAIN MIPS_CP0_32(5, 1)
#define KVM_REG_MIPS_CP0_WIRED MIPS_CP0_32(6, 0) #define KVM_REG_MIPS_CP0_WIRED MIPS_CP0_32(6, 0)
...@@ -665,7 +667,9 @@ __BUILD_KVM_RW_HW(index, 32, MIPS_CP0_TLB_INDEX, 0) ...@@ -665,7 +667,9 @@ __BUILD_KVM_RW_HW(index, 32, MIPS_CP0_TLB_INDEX, 0)
__BUILD_KVM_RW_HW(entrylo0, l, MIPS_CP0_TLB_LO0, 0) __BUILD_KVM_RW_HW(entrylo0, l, MIPS_CP0_TLB_LO0, 0)
__BUILD_KVM_RW_HW(entrylo1, l, MIPS_CP0_TLB_LO1, 0) __BUILD_KVM_RW_HW(entrylo1, l, MIPS_CP0_TLB_LO1, 0)
__BUILD_KVM_RW_HW(context, l, MIPS_CP0_TLB_CONTEXT, 0) __BUILD_KVM_RW_HW(context, l, MIPS_CP0_TLB_CONTEXT, 0)
__BUILD_KVM_RW_HW(contextconfig, 32, MIPS_CP0_TLB_CONTEXT, 1)
__BUILD_KVM_RW_HW(userlocal, l, MIPS_CP0_TLB_CONTEXT, 2) __BUILD_KVM_RW_HW(userlocal, l, MIPS_CP0_TLB_CONTEXT, 2)
__BUILD_KVM_RW_HW(xcontextconfig, l, MIPS_CP0_TLB_CONTEXT, 3)
__BUILD_KVM_RW_HW(pagemask, l, MIPS_CP0_TLB_PG_MASK, 0) __BUILD_KVM_RW_HW(pagemask, l, MIPS_CP0_TLB_PG_MASK, 0)
__BUILD_KVM_RW_HW(pagegrain, 32, MIPS_CP0_TLB_PG_MASK, 1) __BUILD_KVM_RW_HW(pagegrain, 32, MIPS_CP0_TLB_PG_MASK, 1)
__BUILD_KVM_RW_HW(wired, 32, MIPS_CP0_TLB_WIRED, 0) __BUILD_KVM_RW_HW(wired, 32, MIPS_CP0_TLB_WIRED, 0)
......
...@@ -131,7 +131,7 @@ static inline unsigned int kvm_vz_config5_guest_wrmask(struct kvm_vcpu *vcpu) ...@@ -131,7 +131,7 @@ static inline unsigned int kvm_vz_config5_guest_wrmask(struct kvm_vcpu *vcpu)
* Config: M, [MT] * Config: M, [MT]
* Config1: M, [MMUSize-1, C2, MD, PC, WR, CA], FP * Config1: M, [MMUSize-1, C2, MD, PC, WR, CA], FP
* Config2: M * Config2: M
* Config3: M, MSAP, [BPG], ULRI, [DSP2P, DSPP, CTXTC, ITL, LPA, VEIC, * Config3: M, MSAP, [BPG], ULRI, [DSP2P, DSPP], CTXTC, [ITL, LPA, VEIC,
* VInt, SP, CDMM, MT, SM, TL] * VInt, SP, CDMM, MT, SM, TL]
* Config4: M, [VTLBSizeExt, MMUSizeExt] * Config4: M, [VTLBSizeExt, MMUSizeExt]
* Config5: [MRP] * Config5: [MRP]
...@@ -161,7 +161,7 @@ static inline unsigned int kvm_vz_config2_user_wrmask(struct kvm_vcpu *vcpu) ...@@ -161,7 +161,7 @@ static inline unsigned int kvm_vz_config2_user_wrmask(struct kvm_vcpu *vcpu)
static inline unsigned int kvm_vz_config3_user_wrmask(struct kvm_vcpu *vcpu) static inline unsigned int kvm_vz_config3_user_wrmask(struct kvm_vcpu *vcpu)
{ {
unsigned int mask = kvm_vz_config3_guest_wrmask(vcpu) | MIPS_CONF_M | unsigned int mask = kvm_vz_config3_guest_wrmask(vcpu) | MIPS_CONF_M |
MIPS_CONF3_ULRI; MIPS_CONF3_ULRI | MIPS_CONF3_CTXTC;
/* Permit MSA to be present if MSA is supported */ /* Permit MSA to be present if MSA is supported */
if (kvm_mips_guest_can_have_msa(&vcpu->arch)) if (kvm_mips_guest_can_have_msa(&vcpu->arch))
...@@ -1205,6 +1205,13 @@ static u64 kvm_vz_get_one_regs[] = { ...@@ -1205,6 +1205,13 @@ static u64 kvm_vz_get_one_regs[] = {
KVM_REG_MIPS_COUNT_HZ, KVM_REG_MIPS_COUNT_HZ,
}; };
static u64 kvm_vz_get_one_regs_contextconfig[] = {
KVM_REG_MIPS_CP0_CONTEXTCONFIG,
#ifdef CONFIG_64BIT
KVM_REG_MIPS_CP0_XCONTEXTCONFIG,
#endif
};
static u64 kvm_vz_get_one_regs_kscratch[] = { static u64 kvm_vz_get_one_regs_kscratch[] = {
KVM_REG_MIPS_CP0_KSCRATCH1, KVM_REG_MIPS_CP0_KSCRATCH1,
KVM_REG_MIPS_CP0_KSCRATCH2, KVM_REG_MIPS_CP0_KSCRATCH2,
...@@ -1225,6 +1232,8 @@ static unsigned long kvm_vz_num_regs(struct kvm_vcpu *vcpu) ...@@ -1225,6 +1232,8 @@ static unsigned long kvm_vz_num_regs(struct kvm_vcpu *vcpu)
++ret; ++ret;
if (cpu_guest_has_badinstrp) if (cpu_guest_has_badinstrp)
++ret; ++ret;
if (cpu_guest_has_contextconfig)
ret += ARRAY_SIZE(kvm_vz_get_one_regs_contextconfig);
ret += __arch_hweight8(cpu_data[0].guest.kscratch_mask); ret += __arch_hweight8(cpu_data[0].guest.kscratch_mask);
return ret; return ret;
...@@ -1258,6 +1267,12 @@ static int kvm_vz_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices) ...@@ -1258,6 +1267,12 @@ static int kvm_vz_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices)
return -EFAULT; return -EFAULT;
++indices; ++indices;
} }
if (cpu_guest_has_contextconfig) {
if (copy_to_user(indices, kvm_vz_get_one_regs_contextconfig,
sizeof(kvm_vz_get_one_regs_contextconfig)))
return -EFAULT;
indices += ARRAY_SIZE(kvm_vz_get_one_regs_contextconfig);
}
for (i = 0; i < 6; ++i) { for (i = 0; i < 6; ++i) {
if (!cpu_guest_has_kscr(i + 2)) if (!cpu_guest_has_kscr(i + 2))
continue; continue;
...@@ -1323,11 +1338,23 @@ static int kvm_vz_get_one_reg(struct kvm_vcpu *vcpu, ...@@ -1323,11 +1338,23 @@ static int kvm_vz_get_one_reg(struct kvm_vcpu *vcpu,
case KVM_REG_MIPS_CP0_CONTEXT: case KVM_REG_MIPS_CP0_CONTEXT:
*v = (long)read_gc0_context(); *v = (long)read_gc0_context();
break; break;
case KVM_REG_MIPS_CP0_CONTEXTCONFIG:
if (!cpu_guest_has_contextconfig)
return -EINVAL;
*v = read_gc0_contextconfig();
break;
case KVM_REG_MIPS_CP0_USERLOCAL: case KVM_REG_MIPS_CP0_USERLOCAL:
if (!cpu_guest_has_userlocal) if (!cpu_guest_has_userlocal)
return -EINVAL; return -EINVAL;
*v = read_gc0_userlocal(); *v = read_gc0_userlocal();
break; break;
#ifdef CONFIG_64BIT
case KVM_REG_MIPS_CP0_XCONTEXTCONFIG:
if (!cpu_guest_has_contextconfig)
return -EINVAL;
*v = read_gc0_xcontextconfig();
break;
#endif
case KVM_REG_MIPS_CP0_PAGEMASK: case KVM_REG_MIPS_CP0_PAGEMASK:
*v = (long)read_gc0_pagemask(); *v = (long)read_gc0_pagemask();
break; break;
...@@ -1478,11 +1505,23 @@ static int kvm_vz_set_one_reg(struct kvm_vcpu *vcpu, ...@@ -1478,11 +1505,23 @@ static int kvm_vz_set_one_reg(struct kvm_vcpu *vcpu,
case KVM_REG_MIPS_CP0_CONTEXT: case KVM_REG_MIPS_CP0_CONTEXT:
write_gc0_context(v); write_gc0_context(v);
break; break;
case KVM_REG_MIPS_CP0_CONTEXTCONFIG:
if (!cpu_guest_has_contextconfig)
return -EINVAL;
write_gc0_contextconfig(v);
break;
case KVM_REG_MIPS_CP0_USERLOCAL: case KVM_REG_MIPS_CP0_USERLOCAL:
if (!cpu_guest_has_userlocal) if (!cpu_guest_has_userlocal)
return -EINVAL; return -EINVAL;
write_gc0_userlocal(v); write_gc0_userlocal(v);
break; break;
#ifdef CONFIG_64BIT
case KVM_REG_MIPS_CP0_XCONTEXTCONFIG:
if (!cpu_guest_has_contextconfig)
return -EINVAL;
write_gc0_xcontextconfig(v);
break;
#endif
case KVM_REG_MIPS_CP0_PAGEMASK: case KVM_REG_MIPS_CP0_PAGEMASK:
write_gc0_pagemask(v); write_gc0_pagemask(v);
break; break;
...@@ -1874,8 +1913,12 @@ static int kvm_vz_vcpu_load(struct kvm_vcpu *vcpu, int cpu) ...@@ -1874,8 +1913,12 @@ static int kvm_vz_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
kvm_restore_gc0_entrylo0(cop0); kvm_restore_gc0_entrylo0(cop0);
kvm_restore_gc0_entrylo1(cop0); kvm_restore_gc0_entrylo1(cop0);
kvm_restore_gc0_context(cop0); kvm_restore_gc0_context(cop0);
if (cpu_guest_has_contextconfig)
kvm_restore_gc0_contextconfig(cop0);
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
kvm_restore_gc0_xcontext(cop0); kvm_restore_gc0_xcontext(cop0);
if (cpu_guest_has_contextconfig)
kvm_restore_gc0_xcontextconfig(cop0);
#endif #endif
kvm_restore_gc0_pagemask(cop0); kvm_restore_gc0_pagemask(cop0);
kvm_restore_gc0_pagegrain(cop0); kvm_restore_gc0_pagegrain(cop0);
...@@ -1933,8 +1976,12 @@ static int kvm_vz_vcpu_put(struct kvm_vcpu *vcpu, int cpu) ...@@ -1933,8 +1976,12 @@ static int kvm_vz_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
kvm_save_gc0_entrylo0(cop0); kvm_save_gc0_entrylo0(cop0);
kvm_save_gc0_entrylo1(cop0); kvm_save_gc0_entrylo1(cop0);
kvm_save_gc0_context(cop0); kvm_save_gc0_context(cop0);
if (cpu_guest_has_contextconfig)
kvm_save_gc0_contextconfig(cop0);
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
kvm_save_gc0_xcontext(cop0); kvm_save_gc0_xcontext(cop0);
if (cpu_guest_has_contextconfig)
kvm_save_gc0_xcontextconfig(cop0);
#endif #endif
kvm_save_gc0_pagemask(cop0); kvm_save_gc0_pagemask(cop0);
kvm_save_gc0_pagegrain(cop0); kvm_save_gc0_pagegrain(cop0);
...@@ -2298,6 +2345,17 @@ static int kvm_vz_vcpu_setup(struct kvm_vcpu *vcpu) ...@@ -2298,6 +2345,17 @@ static int kvm_vz_vcpu_setup(struct kvm_vcpu *vcpu)
kvm_clear_sw_gc0_config5(cop0, MIPS_CONF5_MRP); kvm_clear_sw_gc0_config5(cop0, MIPS_CONF5_MRP);
} }
if (cpu_guest_has_contextconfig) {
/* ContextConfig */
kvm_write_sw_gc0_contextconfig(cop0, 0x007ffff0);
#ifdef CONFIG_64BIT
/* XContextConfig */
/* bits SEGBITS-13+3:4 set */
kvm_write_sw_gc0_xcontextconfig(cop0,
((1ull << (cpu_vmbits - 13)) - 1) << 4);
#endif
}
/* start with no pending virtual guest interrupts */ /* start with no pending virtual guest interrupts */
if (cpu_has_guestctl2) if (cpu_has_guestctl2)
cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL] = 0; cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL] = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment