Commit 8b474427 authored by Paolo Bonzini's avatar Paolo Bonzini

KVM: x86: use kvm_complete_insn_gp in emulating RDMSR/WRMSR

Simplify the four functions that handle {kernel,user} {rd,wr}msr, there
is still some repetition between the two instances of rdmsr but the
whole business of calling kvm_inject_gp and kvm_skip_emulated_instruction
can be unified nicely.

Because complete_emulated_wrmsr now becomes essentially a call to
kvm_complete_insn_gp, remove complete_emulated_msr.
Reviewed-by: default avatarTom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 9caec4bf
...@@ -1634,27 +1634,20 @@ int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data) ...@@ -1634,27 +1634,20 @@ int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data)
} }
EXPORT_SYMBOL_GPL(kvm_set_msr); EXPORT_SYMBOL_GPL(kvm_set_msr);
static int complete_emulated_msr(struct kvm_vcpu *vcpu, bool is_read) static int complete_emulated_rdmsr(struct kvm_vcpu *vcpu)
{ {
if (vcpu->run->msr.error) { int err = vcpu->run->msr.error;
kvm_inject_gp(vcpu, 0); if (!err) {
return 1;
} else if (is_read) {
kvm_rax_write(vcpu, (u32)vcpu->run->msr.data); kvm_rax_write(vcpu, (u32)vcpu->run->msr.data);
kvm_rdx_write(vcpu, vcpu->run->msr.data >> 32); kvm_rdx_write(vcpu, vcpu->run->msr.data >> 32);
} }
return kvm_skip_emulated_instruction(vcpu); return kvm_complete_insn_gp(vcpu, err);
}
static int complete_emulated_rdmsr(struct kvm_vcpu *vcpu)
{
return complete_emulated_msr(vcpu, true);
} }
static int complete_emulated_wrmsr(struct kvm_vcpu *vcpu) static int complete_emulated_wrmsr(struct kvm_vcpu *vcpu)
{ {
return complete_emulated_msr(vcpu, false); return kvm_complete_insn_gp(vcpu, vcpu->run->msr.error);
} }
static u64 kvm_msr_reason(int r) static u64 kvm_msr_reason(int r)
...@@ -1717,18 +1710,16 @@ int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu) ...@@ -1717,18 +1710,16 @@ int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu)
return 0; return 0;
} }
/* MSR read failed? Inject a #GP */ if (!r) {
if (r) {
trace_kvm_msr_read_ex(ecx);
kvm_inject_gp(vcpu, 0);
return 1;
}
trace_kvm_msr_read(ecx, data); trace_kvm_msr_read(ecx, data);
kvm_rax_write(vcpu, data & -1u); kvm_rax_write(vcpu, data & -1u);
kvm_rdx_write(vcpu, (data >> 32) & -1u); kvm_rdx_write(vcpu, (data >> 32) & -1u);
return kvm_skip_emulated_instruction(vcpu); } else {
trace_kvm_msr_read_ex(ecx);
}
return kvm_complete_insn_gp(vcpu, r);
} }
EXPORT_SYMBOL_GPL(kvm_emulate_rdmsr); EXPORT_SYMBOL_GPL(kvm_emulate_rdmsr);
...@@ -1749,15 +1740,12 @@ int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu) ...@@ -1749,15 +1740,12 @@ int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu)
if (r < 0) if (r < 0)
return r; return r;
/* MSR write failed? Inject a #GP */ if (!r)
if (r > 0) { trace_kvm_msr_write(ecx, data);
else
trace_kvm_msr_write_ex(ecx, data); trace_kvm_msr_write_ex(ecx, data);
kvm_inject_gp(vcpu, 0);
return 1;
}
trace_kvm_msr_write(ecx, data); return kvm_complete_insn_gp(vcpu, r);
return kvm_skip_emulated_instruction(vcpu);
} }
EXPORT_SYMBOL_GPL(kvm_emulate_wrmsr); EXPORT_SYMBOL_GPL(kvm_emulate_wrmsr);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment