Commit d5cd26bc authored by James Hogan's avatar James Hogan Committed by Paolo Bonzini

MIPS: KVM: Factor writing of translated guest instructions

The code in kvm_mips_dyntrans.c to write a translated guest instruction
to guest memory depending on the segment is duplicated between each of
the functions. Additionally the cache op translation functions assume
the instruction is in the KSEG0/1 segment rather than KSEG2/3, which is
generally true but isn't guaranteed.

Factor that code into a new kvm_mips_trans_replace() which handles both
KSEG0/1 and KSEG2/3.
Signed-off-by: default avatarJames Hogan <james.hogan@imgtec.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Radim Krčmář <rkrcmar@redhat.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: linux-mips@linux-mips.org
Cc: kvm@vger.kernel.org
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 66ffc50c
...@@ -28,21 +28,41 @@ ...@@ -28,21 +28,41 @@
#define CLEAR_TEMPLATE 0x00000020 #define CLEAR_TEMPLATE 0x00000020
#define SW_TEMPLATE 0xac000000 #define SW_TEMPLATE 0xac000000
/**
* kvm_mips_trans_replace() - Replace trapping instruction in guest memory.
* @vcpu: Virtual CPU.
* @opc: PC of instruction to replace.
* @replace: Instruction to write
*/
static int kvm_mips_trans_replace(struct kvm_vcpu *vcpu, u32 *opc, u32 replace)
{
unsigned long kseg0_opc, flags;
if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
kseg0_opc =
CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
(vcpu, (unsigned long) opc));
memcpy((void *)kseg0_opc, (void *)&replace, sizeof(u32));
local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
} else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
local_irq_save(flags);
memcpy((void *)opc, (void *)&replace, sizeof(u32));
local_flush_icache_range((unsigned long)opc,
(unsigned long)opc + 32);
local_irq_restore(flags);
} else {
kvm_err("%s: Invalid address: %p\n", __func__, opc);
return -EFAULT;
}
return 0;
}
int kvm_mips_trans_cache_index(u32 inst, u32 *opc, int kvm_mips_trans_cache_index(u32 inst, u32 *opc,
struct kvm_vcpu *vcpu) struct kvm_vcpu *vcpu)
{ {
int result = 0;
unsigned long kseg0_opc;
u32 synci_inst = 0x0;
/* Replace the CACHE instruction, with a NOP */ /* Replace the CACHE instruction, with a NOP */
kseg0_opc = return kvm_mips_trans_replace(vcpu, opc, 0x00000000);
CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
(vcpu, (unsigned long) opc));
memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(u32));
local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
return result;
} }
/* /*
...@@ -52,8 +72,6 @@ int kvm_mips_trans_cache_index(u32 inst, u32 *opc, ...@@ -52,8 +72,6 @@ int kvm_mips_trans_cache_index(u32 inst, u32 *opc,
int kvm_mips_trans_cache_va(u32 inst, u32 *opc, int kvm_mips_trans_cache_va(u32 inst, u32 *opc,
struct kvm_vcpu *vcpu) struct kvm_vcpu *vcpu)
{ {
int result = 0;
unsigned long kseg0_opc;
u32 synci_inst = SYNCI_TEMPLATE, base, offset; u32 synci_inst = SYNCI_TEMPLATE, base, offset;
base = (inst >> 21) & 0x1f; base = (inst >> 21) & 0x1f;
...@@ -61,20 +79,13 @@ int kvm_mips_trans_cache_va(u32 inst, u32 *opc, ...@@ -61,20 +79,13 @@ int kvm_mips_trans_cache_va(u32 inst, u32 *opc,
synci_inst |= (base << 21); synci_inst |= (base << 21);
synci_inst |= offset; synci_inst |= offset;
kseg0_opc = return kvm_mips_trans_replace(vcpu, opc, synci_inst);
CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
(vcpu, (unsigned long) opc));
memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(u32));
local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
return result;
} }
int kvm_mips_trans_mfc0(u32 inst, u32 *opc, struct kvm_vcpu *vcpu) int kvm_mips_trans_mfc0(u32 inst, u32 *opc, struct kvm_vcpu *vcpu)
{ {
u32 rt, rd, sel; u32 rt, rd, sel;
u32 mfc0_inst; u32 mfc0_inst;
unsigned long kseg0_opc, flags;
rt = (inst >> 16) & 0x1f; rt = (inst >> 16) & 0x1f;
rd = (inst >> 11) & 0x1f; rd = (inst >> 11) & 0x1f;
...@@ -90,31 +101,13 @@ int kvm_mips_trans_mfc0(u32 inst, u32 *opc, struct kvm_vcpu *vcpu) ...@@ -90,31 +101,13 @@ int kvm_mips_trans_mfc0(u32 inst, u32 *opc, struct kvm_vcpu *vcpu)
cop0.reg[rd][sel]); cop0.reg[rd][sel]);
} }
if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) { return kvm_mips_trans_replace(vcpu, opc, mfc0_inst);
kseg0_opc =
CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
(vcpu, (unsigned long) opc));
memcpy((void *)kseg0_opc, (void *)&mfc0_inst, sizeof(u32));
local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
} else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
local_irq_save(flags);
memcpy((void *)opc, (void *)&mfc0_inst, sizeof(u32));
local_flush_icache_range((unsigned long)opc,
(unsigned long)opc + 32);
local_irq_restore(flags);
} else {
kvm_err("%s: Invalid address: %p\n", __func__, opc);
return -EFAULT;
}
return 0;
} }
int kvm_mips_trans_mtc0(u32 inst, u32 *opc, struct kvm_vcpu *vcpu) int kvm_mips_trans_mtc0(u32 inst, u32 *opc, struct kvm_vcpu *vcpu)
{ {
u32 rt, rd, sel; u32 rt, rd, sel;
u32 mtc0_inst = SW_TEMPLATE; u32 mtc0_inst = SW_TEMPLATE;
unsigned long kseg0_opc, flags;
rt = (inst >> 16) & 0x1f; rt = (inst >> 16) & 0x1f;
rd = (inst >> 11) & 0x1f; rd = (inst >> 11) & 0x1f;
...@@ -123,22 +116,5 @@ int kvm_mips_trans_mtc0(u32 inst, u32 *opc, struct kvm_vcpu *vcpu) ...@@ -123,22 +116,5 @@ int kvm_mips_trans_mtc0(u32 inst, u32 *opc, struct kvm_vcpu *vcpu)
mtc0_inst |= ((rt & 0x1f) << 16); mtc0_inst |= ((rt & 0x1f) << 16);
mtc0_inst |= offsetof(struct kvm_mips_commpage, cop0.reg[rd][sel]); mtc0_inst |= offsetof(struct kvm_mips_commpage, cop0.reg[rd][sel]);
if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) { return kvm_mips_trans_replace(vcpu, opc, mtc0_inst);
kseg0_opc =
CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
(vcpu, (unsigned long) opc));
memcpy((void *)kseg0_opc, (void *)&mtc0_inst, sizeof(u32));
local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
} else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
local_irq_save(flags);
memcpy((void *)opc, (void *)&mtc0_inst, sizeof(u32));
local_flush_icache_range((unsigned long)opc,
(unsigned long)opc + 32);
local_irq_restore(flags);
} else {
kvm_err("%s: Invalid address: %p\n", __func__, opc);
return -EFAULT;
}
return 0;
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment