Commit 6affcbed authored by Kyle Huey's avatar Kyle Huey Committed by Paolo Bonzini

KVM: x86: Add kvm_skip_emulated_instruction and use it.

kvm_skip_emulated_instruction calls both
kvm_x86_ops->skip_emulated_instruction and kvm_vcpu_check_singlestep,
skipping the emulated instruction and generating a trap if necessary.

Replacing skip_emulated_instruction calls with
kvm_skip_emulated_instruction is straightforward, except for:

- ICEBP, which is already inside a trap, so avoid triggering another trap.
- Instructions that can trigger exits to userspace, such as the IO insns,
  MOVs to CR8, and HALT. If kvm_skip_emulated_instruction does trigger a
  KVM_GUESTDBG_SINGLESTEP exit, and the handling code for
  IN/OUT/MOV CR8/HALT also triggers an exit to userspace, the latter will
  take precedence. The singlestep will be triggered again on the next
  instruction, which is the current behavior.
- Task switch instructions which would require additional handling (e.g.
  the task switch bit) and are instead left alone.
- Cases where VMLAUNCH/VMRESUME do not proceed to the next instruction,
  which do not trigger singlestep traps as mentioned previously.
Signed-off-by: default avatarKyle Huey <khuey@kylehuey.com>
Signed-off-by: default avatarRadim Krčmář <rkrcmar@redhat.com>
parent eb277562
...@@ -1368,7 +1368,8 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, ...@@ -1368,7 +1368,8 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu); bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu);
extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn); extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err); int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu);
int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
int kvm_is_in_guest(void); int kvm_is_in_guest(void);
......
...@@ -890,7 +890,6 @@ int kvm_emulate_cpuid(struct kvm_vcpu *vcpu) ...@@ -890,7 +890,6 @@ int kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
kvm_register_write(vcpu, VCPU_REGS_RBX, ebx); kvm_register_write(vcpu, VCPU_REGS_RBX, ebx);
kvm_register_write(vcpu, VCPU_REGS_RCX, ecx); kvm_register_write(vcpu, VCPU_REGS_RCX, ecx);
kvm_register_write(vcpu, VCPU_REGS_RDX, edx); kvm_register_write(vcpu, VCPU_REGS_RDX, edx);
kvm_x86_ops->skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
return 1;
} }
EXPORT_SYMBOL_GPL(kvm_emulate_cpuid); EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
...@@ -3151,8 +3151,7 @@ static int skinit_interception(struct vcpu_svm *svm) ...@@ -3151,8 +3151,7 @@ static int skinit_interception(struct vcpu_svm *svm)
static int wbinvd_interception(struct vcpu_svm *svm) static int wbinvd_interception(struct vcpu_svm *svm)
{ {
kvm_emulate_wbinvd(&svm->vcpu); return kvm_emulate_wbinvd(&svm->vcpu);
return 1;
} }
static int xsetbv_interception(struct vcpu_svm *svm) static int xsetbv_interception(struct vcpu_svm *svm)
...@@ -3275,9 +3274,7 @@ static int rdpmc_interception(struct vcpu_svm *svm) ...@@ -3275,9 +3274,7 @@ static int rdpmc_interception(struct vcpu_svm *svm)
return emulate_on_interception(svm); return emulate_on_interception(svm);
err = kvm_rdpmc(&svm->vcpu); err = kvm_rdpmc(&svm->vcpu);
kvm_complete_insn_gp(&svm->vcpu, err); return kvm_complete_insn_gp(&svm->vcpu, err);
return 1;
} }
static bool check_selective_cr0_intercepted(struct vcpu_svm *svm, static bool check_selective_cr0_intercepted(struct vcpu_svm *svm,
...@@ -3374,9 +3371,7 @@ static int cr_interception(struct vcpu_svm *svm) ...@@ -3374,9 +3371,7 @@ static int cr_interception(struct vcpu_svm *svm)
} }
kvm_register_write(&svm->vcpu, reg, val); kvm_register_write(&svm->vcpu, reg, val);
} }
kvm_complete_insn_gp(&svm->vcpu, err); return kvm_complete_insn_gp(&svm->vcpu, err);
return 1;
} }
static int dr_interception(struct vcpu_svm *svm) static int dr_interception(struct vcpu_svm *svm)
......
...@@ -5556,7 +5556,7 @@ static int handle_triple_fault(struct kvm_vcpu *vcpu) ...@@ -5556,7 +5556,7 @@ static int handle_triple_fault(struct kvm_vcpu *vcpu)
static int handle_io(struct kvm_vcpu *vcpu) static int handle_io(struct kvm_vcpu *vcpu)
{ {
unsigned long exit_qualification; unsigned long exit_qualification;
int size, in, string; int size, in, string, ret;
unsigned port; unsigned port;
exit_qualification = vmcs_readl(EXIT_QUALIFICATION); exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
...@@ -5570,9 +5570,14 @@ static int handle_io(struct kvm_vcpu *vcpu) ...@@ -5570,9 +5570,14 @@ static int handle_io(struct kvm_vcpu *vcpu)
port = exit_qualification >> 16; port = exit_qualification >> 16;
size = (exit_qualification & 7) + 1; size = (exit_qualification & 7) + 1;
skip_emulated_instruction(vcpu);
return kvm_fast_pio_out(vcpu, size, port); ret = kvm_skip_emulated_instruction(vcpu);
/*
* TODO: we might be squashing a KVM_GUESTDBG_SINGLESTEP-triggered
* KVM_EXIT_DEBUG here.
*/
return kvm_fast_pio_out(vcpu, size, port) && ret;
} }
static void static void
...@@ -5670,6 +5675,7 @@ static int handle_cr(struct kvm_vcpu *vcpu) ...@@ -5670,6 +5675,7 @@ static int handle_cr(struct kvm_vcpu *vcpu)
int cr; int cr;
int reg; int reg;
int err; int err;
int ret;
exit_qualification = vmcs_readl(EXIT_QUALIFICATION); exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
cr = exit_qualification & 15; cr = exit_qualification & 15;
...@@ -5681,25 +5687,27 @@ static int handle_cr(struct kvm_vcpu *vcpu) ...@@ -5681,25 +5687,27 @@ static int handle_cr(struct kvm_vcpu *vcpu)
switch (cr) { switch (cr) {
case 0: case 0:
err = handle_set_cr0(vcpu, val); err = handle_set_cr0(vcpu, val);
kvm_complete_insn_gp(vcpu, err); return kvm_complete_insn_gp(vcpu, err);
return 1;
case 3: case 3:
err = kvm_set_cr3(vcpu, val); err = kvm_set_cr3(vcpu, val);
kvm_complete_insn_gp(vcpu, err); return kvm_complete_insn_gp(vcpu, err);
return 1;
case 4: case 4:
err = handle_set_cr4(vcpu, val); err = handle_set_cr4(vcpu, val);
kvm_complete_insn_gp(vcpu, err); return kvm_complete_insn_gp(vcpu, err);
return 1;
case 8: { case 8: {
u8 cr8_prev = kvm_get_cr8(vcpu); u8 cr8_prev = kvm_get_cr8(vcpu);
u8 cr8 = (u8)val; u8 cr8 = (u8)val;
err = kvm_set_cr8(vcpu, cr8); err = kvm_set_cr8(vcpu, cr8);
kvm_complete_insn_gp(vcpu, err); ret = kvm_complete_insn_gp(vcpu, err);
if (lapic_in_kernel(vcpu)) if (lapic_in_kernel(vcpu))
return 1; return ret;
if (cr8_prev <= cr8) if (cr8_prev <= cr8)
return 1; return ret;
/*
* TODO: we might be squashing a
* KVM_GUESTDBG_SINGLESTEP-triggered
* KVM_EXIT_DEBUG here.
*/
vcpu->run->exit_reason = KVM_EXIT_SET_TPR; vcpu->run->exit_reason = KVM_EXIT_SET_TPR;
return 0; return 0;
} }
...@@ -5709,22 +5717,19 @@ static int handle_cr(struct kvm_vcpu *vcpu) ...@@ -5709,22 +5717,19 @@ static int handle_cr(struct kvm_vcpu *vcpu)
handle_clts(vcpu); handle_clts(vcpu);
trace_kvm_cr_write(0, kvm_read_cr0(vcpu)); trace_kvm_cr_write(0, kvm_read_cr0(vcpu));
vmx_fpu_activate(vcpu); vmx_fpu_activate(vcpu);
skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
return 1;
case 1: /*mov from cr*/ case 1: /*mov from cr*/
switch (cr) { switch (cr) {
case 3: case 3:
val = kvm_read_cr3(vcpu); val = kvm_read_cr3(vcpu);
kvm_register_write(vcpu, reg, val); kvm_register_write(vcpu, reg, val);
trace_kvm_cr_read(cr, val); trace_kvm_cr_read(cr, val);
skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
return 1;
case 8: case 8:
val = kvm_get_cr8(vcpu); val = kvm_get_cr8(vcpu);
kvm_register_write(vcpu, reg, val); kvm_register_write(vcpu, reg, val);
trace_kvm_cr_read(cr, val); trace_kvm_cr_read(cr, val);
skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
return 1;
} }
break; break;
case 3: /* lmsw */ case 3: /* lmsw */
...@@ -5732,8 +5737,7 @@ static int handle_cr(struct kvm_vcpu *vcpu) ...@@ -5732,8 +5737,7 @@ static int handle_cr(struct kvm_vcpu *vcpu)
trace_kvm_cr_write(0, (kvm_read_cr0(vcpu) & ~0xful) | val); trace_kvm_cr_write(0, (kvm_read_cr0(vcpu) & ~0xful) | val);
kvm_lmsw(vcpu, val); kvm_lmsw(vcpu, val);
skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
return 1;
default: default:
break; break;
} }
...@@ -5804,8 +5808,7 @@ static int handle_dr(struct kvm_vcpu *vcpu) ...@@ -5804,8 +5808,7 @@ static int handle_dr(struct kvm_vcpu *vcpu)
if (kvm_set_dr(vcpu, dr, kvm_register_readl(vcpu, reg))) if (kvm_set_dr(vcpu, dr, kvm_register_readl(vcpu, reg)))
return 1; return 1;
skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
return 1;
} }
static u64 vmx_get_dr6(struct kvm_vcpu *vcpu) static u64 vmx_get_dr6(struct kvm_vcpu *vcpu)
...@@ -5858,8 +5861,7 @@ static int handle_rdmsr(struct kvm_vcpu *vcpu) ...@@ -5858,8 +5861,7 @@ static int handle_rdmsr(struct kvm_vcpu *vcpu)
/* FIXME: handling of bits 32:63 of rax, rdx */ /* FIXME: handling of bits 32:63 of rax, rdx */
vcpu->arch.regs[VCPU_REGS_RAX] = msr_info.data & -1u; vcpu->arch.regs[VCPU_REGS_RAX] = msr_info.data & -1u;
vcpu->arch.regs[VCPU_REGS_RDX] = (msr_info.data >> 32) & -1u; vcpu->arch.regs[VCPU_REGS_RDX] = (msr_info.data >> 32) & -1u;
skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
return 1;
} }
static int handle_wrmsr(struct kvm_vcpu *vcpu) static int handle_wrmsr(struct kvm_vcpu *vcpu)
...@@ -5879,8 +5881,7 @@ static int handle_wrmsr(struct kvm_vcpu *vcpu) ...@@ -5879,8 +5881,7 @@ static int handle_wrmsr(struct kvm_vcpu *vcpu)
} }
trace_kvm_msr_write(ecx, data); trace_kvm_msr_write(ecx, data);
skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
return 1;
} }
static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu) static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu)
...@@ -5924,8 +5925,7 @@ static int handle_invlpg(struct kvm_vcpu *vcpu) ...@@ -5924,8 +5925,7 @@ static int handle_invlpg(struct kvm_vcpu *vcpu)
unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
kvm_mmu_invlpg(vcpu, exit_qualification); kvm_mmu_invlpg(vcpu, exit_qualification);
skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
return 1;
} }
static int handle_rdpmc(struct kvm_vcpu *vcpu) static int handle_rdpmc(struct kvm_vcpu *vcpu)
...@@ -5933,15 +5933,12 @@ static int handle_rdpmc(struct kvm_vcpu *vcpu) ...@@ -5933,15 +5933,12 @@ static int handle_rdpmc(struct kvm_vcpu *vcpu)
int err; int err;
err = kvm_rdpmc(vcpu); err = kvm_rdpmc(vcpu);
kvm_complete_insn_gp(vcpu, err); return kvm_complete_insn_gp(vcpu, err);
return 1;
} }
static int handle_wbinvd(struct kvm_vcpu *vcpu) static int handle_wbinvd(struct kvm_vcpu *vcpu)
{ {
kvm_emulate_wbinvd(vcpu); return kvm_emulate_wbinvd(vcpu);
return 1;
} }
static int handle_xsetbv(struct kvm_vcpu *vcpu) static int handle_xsetbv(struct kvm_vcpu *vcpu)
...@@ -5950,20 +5947,20 @@ static int handle_xsetbv(struct kvm_vcpu *vcpu) ...@@ -5950,20 +5947,20 @@ static int handle_xsetbv(struct kvm_vcpu *vcpu)
u32 index = kvm_register_read(vcpu, VCPU_REGS_RCX); u32 index = kvm_register_read(vcpu, VCPU_REGS_RCX);
if (kvm_set_xcr(vcpu, index, new_bv) == 0) if (kvm_set_xcr(vcpu, index, new_bv) == 0)
skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
return 1; return 1;
} }
static int handle_xsaves(struct kvm_vcpu *vcpu) static int handle_xsaves(struct kvm_vcpu *vcpu)
{ {
skip_emulated_instruction(vcpu); kvm_skip_emulated_instruction(vcpu);
WARN(1, "this should never happen\n"); WARN(1, "this should never happen\n");
return 1; return 1;
} }
static int handle_xrstors(struct kvm_vcpu *vcpu) static int handle_xrstors(struct kvm_vcpu *vcpu)
{ {
skip_emulated_instruction(vcpu); kvm_skip_emulated_instruction(vcpu);
WARN(1, "this should never happen\n"); WARN(1, "this should never happen\n");
return 1; return 1;
} }
...@@ -5984,8 +5981,7 @@ static int handle_apic_access(struct kvm_vcpu *vcpu) ...@@ -5984,8 +5981,7 @@ static int handle_apic_access(struct kvm_vcpu *vcpu)
if ((access_type == TYPE_LINEAR_APIC_INST_WRITE) && if ((access_type == TYPE_LINEAR_APIC_INST_WRITE) &&
(offset == APIC_EOI)) { (offset == APIC_EOI)) {
kvm_lapic_set_eoi(vcpu); kvm_lapic_set_eoi(vcpu);
skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
return 1;
} }
} }
return emulate_instruction(vcpu, 0) == EMULATE_DONE; return emulate_instruction(vcpu, 0) == EMULATE_DONE;
...@@ -6134,8 +6130,7 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu) ...@@ -6134,8 +6130,7 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS); gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
if (!kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) { if (!kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) {
trace_kvm_fast_mmio(gpa); trace_kvm_fast_mmio(gpa);
skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
return 1;
} }
ret = handle_mmio_page_fault(vcpu, gpa, true); ret = handle_mmio_page_fault(vcpu, gpa, true);
...@@ -6508,15 +6503,12 @@ static int handle_pause(struct kvm_vcpu *vcpu) ...@@ -6508,15 +6503,12 @@ static int handle_pause(struct kvm_vcpu *vcpu)
grow_ple_window(vcpu); grow_ple_window(vcpu);
kvm_vcpu_on_spin(vcpu); kvm_vcpu_on_spin(vcpu);
skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
return 1;
} }
static int handle_nop(struct kvm_vcpu *vcpu) static int handle_nop(struct kvm_vcpu *vcpu)
{ {
skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
return 1;
} }
static int handle_mwait(struct kvm_vcpu *vcpu) static int handle_mwait(struct kvm_vcpu *vcpu)
...@@ -6823,8 +6815,7 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason, ...@@ -6823,8 +6815,7 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
*/ */
if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) { if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
nested_vmx_failInvalid(vcpu); nested_vmx_failInvalid(vcpu);
skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
return 1;
} }
page = nested_get_page(vcpu, vmptr); page = nested_get_page(vcpu, vmptr);
...@@ -6832,8 +6823,7 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason, ...@@ -6832,8 +6823,7 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
*(u32 *)kmap(page) != VMCS12_REVISION) { *(u32 *)kmap(page) != VMCS12_REVISION) {
nested_vmx_failInvalid(vcpu); nested_vmx_failInvalid(vcpu);
kunmap(page); kunmap(page);
skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
return 1;
} }
kunmap(page); kunmap(page);
vmx->nested.vmxon_ptr = vmptr; vmx->nested.vmxon_ptr = vmptr;
...@@ -6842,30 +6832,26 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason, ...@@ -6842,30 +6832,26 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) { if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
nested_vmx_failValid(vcpu, nested_vmx_failValid(vcpu,
VMXERR_VMCLEAR_INVALID_ADDRESS); VMXERR_VMCLEAR_INVALID_ADDRESS);
skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
return 1;
} }
if (vmptr == vmx->nested.vmxon_ptr) { if (vmptr == vmx->nested.vmxon_ptr) {
nested_vmx_failValid(vcpu, nested_vmx_failValid(vcpu,
VMXERR_VMCLEAR_VMXON_POINTER); VMXERR_VMCLEAR_VMXON_POINTER);
skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
return 1;
} }
break; break;
case EXIT_REASON_VMPTRLD: case EXIT_REASON_VMPTRLD:
if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) { if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
nested_vmx_failValid(vcpu, nested_vmx_failValid(vcpu,
VMXERR_VMPTRLD_INVALID_ADDRESS); VMXERR_VMPTRLD_INVALID_ADDRESS);
skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
return 1;
} }
if (vmptr == vmx->nested.vmxon_ptr) { if (vmptr == vmx->nested.vmxon_ptr) {
nested_vmx_failValid(vcpu, nested_vmx_failValid(vcpu,
VMXERR_VMCLEAR_VMXON_POINTER); VMXERR_VMCLEAR_VMXON_POINTER);
skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
return 1;
} }
break; break;
default: default:
...@@ -6921,8 +6907,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu) ...@@ -6921,8 +6907,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
if (vmx->nested.vmxon) { if (vmx->nested.vmxon) {
nested_vmx_failValid(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION); nested_vmx_failValid(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION);
skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
return 1;
} }
if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES) if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES)
...@@ -6963,8 +6948,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu) ...@@ -6963,8 +6948,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
vmx->nested.vmxon = true; vmx->nested.vmxon = true;
nested_vmx_succeed(vcpu); nested_vmx_succeed(vcpu);
skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
return 1;
out_shadow_vmcs: out_shadow_vmcs:
kfree(vmx->nested.cached_vmcs12); kfree(vmx->nested.cached_vmcs12);
...@@ -7084,8 +7068,7 @@ static int handle_vmoff(struct kvm_vcpu *vcpu) ...@@ -7084,8 +7068,7 @@ static int handle_vmoff(struct kvm_vcpu *vcpu)
return 1; return 1;
free_nested(to_vmx(vcpu)); free_nested(to_vmx(vcpu));
nested_vmx_succeed(vcpu); nested_vmx_succeed(vcpu);
skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
return 1;
} }
/* Emulate the VMCLEAR instruction */ /* Emulate the VMCLEAR instruction */
...@@ -7125,8 +7108,7 @@ static int handle_vmclear(struct kvm_vcpu *vcpu) ...@@ -7125,8 +7108,7 @@ static int handle_vmclear(struct kvm_vcpu *vcpu)
nested_free_vmcs02(vmx, vmptr); nested_free_vmcs02(vmx, vmptr);
nested_vmx_succeed(vcpu); nested_vmx_succeed(vcpu);
skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
return 1;
} }
static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch); static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch);
...@@ -7340,18 +7322,15 @@ static int handle_vmread(struct kvm_vcpu *vcpu) ...@@ -7340,18 +7322,15 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
if (!nested_vmx_check_permission(vcpu)) if (!nested_vmx_check_permission(vcpu))
return 1; return 1;
if (!nested_vmx_check_vmcs12(vcpu)) { if (!nested_vmx_check_vmcs12(vcpu))
skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
return 1;
}
/* Decode instruction info and find the field to read */ /* Decode instruction info and find the field to read */
field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
/* Read the field, zero-extended to a u64 field_value */ /* Read the field, zero-extended to a u64 field_value */
if (vmcs12_read_any(vcpu, field, &field_value) < 0) { if (vmcs12_read_any(vcpu, field, &field_value) < 0) {
nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT); nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
return 1;
} }
/* /*
* Now copy part of this value to register or memory, as requested. * Now copy part of this value to register or memory, as requested.
...@@ -7371,8 +7350,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu) ...@@ -7371,8 +7350,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
} }
nested_vmx_succeed(vcpu); nested_vmx_succeed(vcpu);
skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
return 1;
} }
...@@ -7394,10 +7372,8 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu) ...@@ -7394,10 +7372,8 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
if (!nested_vmx_check_permission(vcpu)) if (!nested_vmx_check_permission(vcpu))
return 1; return 1;
if (!nested_vmx_check_vmcs12(vcpu)) { if (!nested_vmx_check_vmcs12(vcpu))
skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
return 1;
}
if (vmx_instruction_info & (1u << 10)) if (vmx_instruction_info & (1u << 10))
field_value = kvm_register_readl(vcpu, field_value = kvm_register_readl(vcpu,
...@@ -7418,19 +7394,16 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu) ...@@ -7418,19 +7394,16 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
if (vmcs_field_readonly(field)) { if (vmcs_field_readonly(field)) {
nested_vmx_failValid(vcpu, nested_vmx_failValid(vcpu,
VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT); VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT);
skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
return 1;
} }
if (vmcs12_write_any(vcpu, field, field_value) < 0) { if (vmcs12_write_any(vcpu, field, field_value) < 0) {
nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT); nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
return 1;
} }
nested_vmx_succeed(vcpu); nested_vmx_succeed(vcpu);
skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
return 1;
} }
/* Emulate the VMPTRLD instruction */ /* Emulate the VMPTRLD instruction */
...@@ -7451,8 +7424,7 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu) ...@@ -7451,8 +7424,7 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
page = nested_get_page(vcpu, vmptr); page = nested_get_page(vcpu, vmptr);
if (page == NULL) { if (page == NULL) {
nested_vmx_failInvalid(vcpu); nested_vmx_failInvalid(vcpu);
skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
return 1;
} }
new_vmcs12 = kmap(page); new_vmcs12 = kmap(page);
if (new_vmcs12->revision_id != VMCS12_REVISION) { if (new_vmcs12->revision_id != VMCS12_REVISION) {
...@@ -7460,8 +7432,7 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu) ...@@ -7460,8 +7432,7 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
nested_release_page_clean(page); nested_release_page_clean(page);
nested_vmx_failValid(vcpu, nested_vmx_failValid(vcpu,
VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID); VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
return 1;
} }
nested_release_vmcs12(vmx); nested_release_vmcs12(vmx);
...@@ -7485,8 +7456,7 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu) ...@@ -7485,8 +7456,7 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
} }
nested_vmx_succeed(vcpu); nested_vmx_succeed(vcpu);
skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
return 1;
} }
/* Emulate the VMPTRST instruction */ /* Emulate the VMPTRST instruction */
...@@ -7511,8 +7481,7 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu) ...@@ -7511,8 +7481,7 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
return 1; return 1;
} }
nested_vmx_succeed(vcpu); nested_vmx_succeed(vcpu);
skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
return 1;
} }
/* Emulate the INVEPT instruction */ /* Emulate the INVEPT instruction */
...@@ -7550,8 +7519,7 @@ static int handle_invept(struct kvm_vcpu *vcpu) ...@@ -7550,8 +7519,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
if (type >= 32 || !(types & (1 << type))) { if (type >= 32 || !(types & (1 << type))) {
nested_vmx_failValid(vcpu, nested_vmx_failValid(vcpu,
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
return 1;
} }
/* According to the Intel VMX instruction reference, the memory /* According to the Intel VMX instruction reference, the memory
...@@ -7582,8 +7550,7 @@ static int handle_invept(struct kvm_vcpu *vcpu) ...@@ -7582,8 +7550,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
break; break;
} }
skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
return 1;
} }
static int handle_invvpid(struct kvm_vcpu *vcpu) static int handle_invvpid(struct kvm_vcpu *vcpu)
...@@ -7614,8 +7581,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu) ...@@ -7614,8 +7581,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
if (type >= 32 || !(types & (1 << type))) { if (type >= 32 || !(types & (1 << type))) {
nested_vmx_failValid(vcpu, nested_vmx_failValid(vcpu,
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
return 1;
} }
/* according to the intel vmx instruction reference, the memory /* according to the intel vmx instruction reference, the memory
...@@ -7637,23 +7603,20 @@ static int handle_invvpid(struct kvm_vcpu *vcpu) ...@@ -7637,23 +7603,20 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
if (!vpid) { if (!vpid) {
nested_vmx_failValid(vcpu, nested_vmx_failValid(vcpu,
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
return 1;
} }
break; break;
case VMX_VPID_EXTENT_ALL_CONTEXT: case VMX_VPID_EXTENT_ALL_CONTEXT:
break; break;
default: default:
WARN_ON_ONCE(1); WARN_ON_ONCE(1);
skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
return 1;
} }
__vmx_flush_tlb(vcpu, vmx->nested.vpid02); __vmx_flush_tlb(vcpu, vmx->nested.vpid02);
nested_vmx_succeed(vcpu); nested_vmx_succeed(vcpu);
skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
return 1;
} }
static int handle_pml_full(struct kvm_vcpu *vcpu) static int handle_pml_full(struct kvm_vcpu *vcpu)
...@@ -10194,6 +10157,11 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) ...@@ -10194,6 +10157,11 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
if (!vmcs02) if (!vmcs02)
return -ENOMEM; return -ENOMEM;
/*
* After this point, the trap flag no longer triggers a singlestep trap
* on the vm entry instructions. Don't call
* kvm_skip_emulated_instruction.
*/
skip_emulated_instruction(vcpu); skip_emulated_instruction(vcpu);
enter_guest_mode(vcpu); enter_guest_mode(vcpu);
...@@ -10238,8 +10206,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) ...@@ -10238,8 +10206,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
return 1; return 1;
out: out:
skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
return 1;
} }
/* /*
......
...@@ -425,12 +425,14 @@ void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr) ...@@ -425,12 +425,14 @@ void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr)
} }
EXPORT_SYMBOL_GPL(kvm_requeue_exception); EXPORT_SYMBOL_GPL(kvm_requeue_exception);
void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err) int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err)
{ {
if (err) if (err)
kvm_inject_gp(vcpu, 0); kvm_inject_gp(vcpu, 0);
else else
kvm_x86_ops->skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
return 1;
} }
EXPORT_SYMBOL_GPL(kvm_complete_insn_gp); EXPORT_SYMBOL_GPL(kvm_complete_insn_gp);
...@@ -4813,8 +4815,8 @@ static int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu) ...@@ -4813,8 +4815,8 @@ static int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu)
int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu) int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
{ {
kvm_x86_ops->skip_emulated_instruction(vcpu); kvm_emulate_wbinvd_noskip(vcpu);
return kvm_emulate_wbinvd_noskip(vcpu); return kvm_skip_emulated_instruction(vcpu);
} }
EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd); EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd);
...@@ -5430,6 +5432,17 @@ static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, unsigned long rflag ...@@ -5430,6 +5432,17 @@ static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, unsigned long rflag
} }
} }
int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
{
unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
int r = EMULATE_DONE;
kvm_x86_ops->skip_emulated_instruction(vcpu);
kvm_vcpu_check_singlestep(vcpu, rflags, &r);
return r == EMULATE_DONE;
}
EXPORT_SYMBOL_GPL(kvm_skip_emulated_instruction);
static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r) static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r)
{ {
if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) && if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) &&
...@@ -6007,8 +6020,12 @@ EXPORT_SYMBOL_GPL(kvm_vcpu_halt); ...@@ -6007,8 +6020,12 @@ EXPORT_SYMBOL_GPL(kvm_vcpu_halt);
int kvm_emulate_halt(struct kvm_vcpu *vcpu) int kvm_emulate_halt(struct kvm_vcpu *vcpu)
{ {
kvm_x86_ops->skip_emulated_instruction(vcpu); int ret = kvm_skip_emulated_instruction(vcpu);
return kvm_vcpu_halt(vcpu); /*
* TODO: we might be squashing a GUESTDBG_SINGLESTEP-triggered
* KVM_EXIT_DEBUG here.
*/
return kvm_vcpu_halt(vcpu) && ret;
} }
EXPORT_SYMBOL_GPL(kvm_emulate_halt); EXPORT_SYMBOL_GPL(kvm_emulate_halt);
...@@ -6039,9 +6056,9 @@ void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu) ...@@ -6039,9 +6056,9 @@ void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu)
int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
{ {
unsigned long nr, a0, a1, a2, a3, ret; unsigned long nr, a0, a1, a2, a3, ret;
int op_64_bit, r = 1; int op_64_bit, r;
kvm_x86_ops->skip_emulated_instruction(vcpu); r = kvm_skip_emulated_instruction(vcpu);
if (kvm_hv_hypercall_enabled(vcpu->kvm)) if (kvm_hv_hypercall_enabled(vcpu->kvm))
return kvm_hv_hypercall(vcpu); return kvm_hv_hypercall(vcpu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment