Commit 3f3393b3 authored by Babu Moger's avatar Babu Moger Committed by Paolo Bonzini

KVM: X86: Rename and move the function vmx_handle_memory_failure to x86.c

Handling of kvm_read/write_guest_virt*() errors can be moved to common
code. The same code can be used by both VMX and SVM.
Signed-off-by: default avatarBabu Moger <babu.moger@amd.com>
Reviewed-by: default avatarJim Mattson <jmattson@google.com>
Message-Id: <159985254493.11252.6603092560732507607.stgit@bmoger-ubuntu>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 830bd71f
...@@ -4696,7 +4696,7 @@ static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer, ...@@ -4696,7 +4696,7 @@ static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer,
r = kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e); r = kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e);
if (r != X86EMUL_CONTINUE) { if (r != X86EMUL_CONTINUE) {
*ret = vmx_handle_memory_failure(vcpu, r, &e); *ret = kvm_handle_memory_failure(vcpu, r, &e);
return -EINVAL; return -EINVAL;
} }
...@@ -5003,7 +5003,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu) ...@@ -5003,7 +5003,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
/* _system ok, nested_vmx_check_permission has verified cpl=0 */ /* _system ok, nested_vmx_check_permission has verified cpl=0 */
r = kvm_write_guest_virt_system(vcpu, gva, &value, len, &e); r = kvm_write_guest_virt_system(vcpu, gva, &value, len, &e);
if (r != X86EMUL_CONTINUE) if (r != X86EMUL_CONTINUE)
return vmx_handle_memory_failure(vcpu, r, &e); return kvm_handle_memory_failure(vcpu, r, &e);
} }
return nested_vmx_succeed(vcpu); return nested_vmx_succeed(vcpu);
...@@ -5076,7 +5076,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu) ...@@ -5076,7 +5076,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
return 1; return 1;
r = kvm_read_guest_virt(vcpu, gva, &value, len, &e); r = kvm_read_guest_virt(vcpu, gva, &value, len, &e);
if (r != X86EMUL_CONTINUE) if (r != X86EMUL_CONTINUE)
return vmx_handle_memory_failure(vcpu, r, &e); return kvm_handle_memory_failure(vcpu, r, &e);
} }
field = kvm_register_readl(vcpu, (((instr_info) >> 28) & 0xf)); field = kvm_register_readl(vcpu, (((instr_info) >> 28) & 0xf));
...@@ -5238,7 +5238,7 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu) ...@@ -5238,7 +5238,7 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
r = kvm_write_guest_virt_system(vcpu, gva, (void *)&current_vmptr, r = kvm_write_guest_virt_system(vcpu, gva, (void *)&current_vmptr,
sizeof(gpa_t), &e); sizeof(gpa_t), &e);
if (r != X86EMUL_CONTINUE) if (r != X86EMUL_CONTINUE)
return vmx_handle_memory_failure(vcpu, r, &e); return kvm_handle_memory_failure(vcpu, r, &e);
return nested_vmx_succeed(vcpu); return nested_vmx_succeed(vcpu);
} }
...@@ -5291,7 +5291,7 @@ static int handle_invept(struct kvm_vcpu *vcpu) ...@@ -5291,7 +5291,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
return 1; return 1;
r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e); r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e);
if (r != X86EMUL_CONTINUE) if (r != X86EMUL_CONTINUE)
return vmx_handle_memory_failure(vcpu, r, &e); return kvm_handle_memory_failure(vcpu, r, &e);
/* /*
* Nested EPT roots are always held through guest_mmu, * Nested EPT roots are always held through guest_mmu,
...@@ -5373,7 +5373,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu) ...@@ -5373,7 +5373,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
return 1; return 1;
r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e); r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e);
if (r != X86EMUL_CONTINUE) if (r != X86EMUL_CONTINUE)
return vmx_handle_memory_failure(vcpu, r, &e); return kvm_handle_memory_failure(vcpu, r, &e);
if (operand.vpid >> 16) if (operand.vpid >> 16)
return nested_vmx_fail(vcpu, return nested_vmx_fail(vcpu,
......
...@@ -1598,33 +1598,6 @@ static int skip_emulated_instruction(struct kvm_vcpu *vcpu) ...@@ -1598,33 +1598,6 @@ static int skip_emulated_instruction(struct kvm_vcpu *vcpu)
return 1; return 1;
} }
/*
* Handles kvm_read/write_guest_virt*() result and either injects #PF or returns
* KVM_EXIT_INTERNAL_ERROR for cases not currently handled by KVM. Return value
* indicates whether exit to userspace is needed.
*/
int vmx_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
struct x86_exception *e)
{
if (r == X86EMUL_PROPAGATE_FAULT) {
kvm_inject_emulated_page_fault(vcpu, e);
return 1;
}
/*
* In case kvm_read/write_guest_virt*() failed with X86EMUL_IO_NEEDED
* while handling a VMX instruction KVM could've handled the request
* correctly by exiting to userspace and performing I/O but there
* doesn't seem to be a real use-case behind such requests, just return
* KVM_EXIT_INTERNAL_ERROR for now.
*/
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
vcpu->run->internal.ndata = 0;
return 0;
}
/* /*
* Recognizes a pending MTF VM-exit and records the nested state for later * Recognizes a pending MTF VM-exit and records the nested state for later
* delivery. * delivery.
...@@ -5558,7 +5531,7 @@ static int handle_invpcid(struct kvm_vcpu *vcpu) ...@@ -5558,7 +5531,7 @@ static int handle_invpcid(struct kvm_vcpu *vcpu)
r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e); r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e);
if (r != X86EMUL_CONTINUE) if (r != X86EMUL_CONTINUE)
return vmx_handle_memory_failure(vcpu, r, &e); return kvm_handle_memory_failure(vcpu, r, &e);
if (operand.pcid >> 12 != 0) { if (operand.pcid >> 12 != 0) {
kvm_inject_gp(vcpu, 0); kvm_inject_gp(vcpu, 0);
......
...@@ -354,8 +354,6 @@ struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr); ...@@ -354,8 +354,6 @@ struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr);
void pt_update_intercept_for_msr(struct vcpu_vmx *vmx); void pt_update_intercept_for_msr(struct vcpu_vmx *vmx);
void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp); void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp);
int vmx_find_msr_index(struct vmx_msrs *m, u32 msr); int vmx_find_msr_index(struct vmx_msrs *m, u32 msr);
int vmx_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
struct x86_exception *e);
void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu); void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu);
#define POSTED_INTR_ON 0 #define POSTED_INTR_ON 0
......
...@@ -10765,6 +10765,34 @@ void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_c ...@@ -10765,6 +10765,34 @@ void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_c
} }
EXPORT_SYMBOL_GPL(kvm_fixup_and_inject_pf_error); EXPORT_SYMBOL_GPL(kvm_fixup_and_inject_pf_error);
/*
* Handles kvm_read/write_guest_virt*() result and either injects #PF or returns
* KVM_EXIT_INTERNAL_ERROR for cases not currently handled by KVM. Return value
* indicates whether exit to userspace is needed.
*/
int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
struct x86_exception *e)
{
if (r == X86EMUL_PROPAGATE_FAULT) {
kvm_inject_emulated_page_fault(vcpu, e);
return 1;
}
/*
* In case kvm_read/write_guest_virt*() failed with X86EMUL_IO_NEEDED
* while handling a VMX instruction KVM could've handled the request
* correctly by exiting to userspace and performing I/O but there
* doesn't seem to be a real use-case behind such requests, just return
* KVM_EXIT_INTERNAL_ERROR for now.
*/
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
vcpu->run->internal.ndata = 0;
return 0;
}
EXPORT_SYMBOL_GPL(kvm_handle_memory_failure);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
......
...@@ -371,6 +371,8 @@ void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu); ...@@ -371,6 +371,8 @@ void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu);
int kvm_spec_ctrl_test_value(u64 value); int kvm_spec_ctrl_test_value(u64 value);
int kvm_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); int kvm_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu); bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu);
int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
struct x86_exception *e);
#define KVM_MSR_RET_INVALID 2 #define KVM_MSR_RET_INVALID 2
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment