Commit ff651cb6 authored by Wincy Van's avatar Wincy Van Committed by Paolo Bonzini

KVM: nVMX: Add nested msr load/restore algorithm

Several hypervisors need MSR auto load/restore feature.
We read MSRs from VM-entry MSR load area which specified by L1,
and load them via kvm_set_msr in the nested entry.
When nested exit occurs, we get MSRs via kvm_get_msr, writing
them to L1`s MSR store area. After this, we read MSRs from VM-exit
MSR load area, and load them via kvm_set_msr.
Signed-off-by: default avatarWincy Van <fanwenyi0529@gmail.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent b1940cd2
...@@ -56,6 +56,7 @@ ...@@ -56,6 +56,7 @@
#define EXIT_REASON_MSR_READ 31 #define EXIT_REASON_MSR_READ 31
#define EXIT_REASON_MSR_WRITE 32 #define EXIT_REASON_MSR_WRITE 32
#define EXIT_REASON_INVALID_STATE 33 #define EXIT_REASON_INVALID_STATE 33
#define EXIT_REASON_MSR_LOAD_FAIL 34
#define EXIT_REASON_MWAIT_INSTRUCTION 36 #define EXIT_REASON_MWAIT_INSTRUCTION 36
#define EXIT_REASON_MONITOR_INSTRUCTION 39 #define EXIT_REASON_MONITOR_INSTRUCTION 39
#define EXIT_REASON_PAUSE_INSTRUCTION 40 #define EXIT_REASON_PAUSE_INSTRUCTION 40
...@@ -116,10 +117,14 @@ ...@@ -116,10 +117,14 @@
{ EXIT_REASON_APIC_WRITE, "APIC_WRITE" }, \ { EXIT_REASON_APIC_WRITE, "APIC_WRITE" }, \
{ EXIT_REASON_EOI_INDUCED, "EOI_INDUCED" }, \ { EXIT_REASON_EOI_INDUCED, "EOI_INDUCED" }, \
{ EXIT_REASON_INVALID_STATE, "INVALID_STATE" }, \ { EXIT_REASON_INVALID_STATE, "INVALID_STATE" }, \
{ EXIT_REASON_MSR_LOAD_FAIL, "MSR_LOAD_FAIL" }, \
{ EXIT_REASON_INVD, "INVD" }, \ { EXIT_REASON_INVD, "INVD" }, \
{ EXIT_REASON_INVVPID, "INVVPID" }, \ { EXIT_REASON_INVVPID, "INVVPID" }, \
{ EXIT_REASON_INVPCID, "INVPCID" }, \ { EXIT_REASON_INVPCID, "INVPCID" }, \
{ EXIT_REASON_XSAVES, "XSAVES" }, \ { EXIT_REASON_XSAVES, "XSAVES" }, \
{ EXIT_REASON_XRSTORS, "XRSTORS" } { EXIT_REASON_XRSTORS, "XRSTORS" }
#define VMX_ABORT_SAVE_GUEST_MSR_FAIL 1
#define VMX_ABORT_LOAD_HOST_MSR_FAIL 4
#endif /* _UAPIVMX_H */ #endif /* _UAPIVMX_H */
...@@ -6143,6 +6143,13 @@ static void nested_vmx_failValid(struct kvm_vcpu *vcpu, ...@@ -6143,6 +6143,13 @@ static void nested_vmx_failValid(struct kvm_vcpu *vcpu,
*/ */
} }
static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator)
{
/* TODO: not to reset guest simply here. */
kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
pr_warn("kvm: nested vmx abort, indicator %d\n", indicator);
}
static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer) static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer)
{ {
struct vcpu_vmx *vmx = struct vcpu_vmx *vmx =
...@@ -8286,6 +8293,67 @@ static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu) ...@@ -8286,6 +8293,67 @@ static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu)
ns_to_ktime(preemption_timeout), HRTIMER_MODE_REL); ns_to_ktime(preemption_timeout), HRTIMER_MODE_REL);
} }
static inline int nested_vmx_msr_check_common(struct vmx_msr_entry *e)
{
if (e->index >> 8 == 0x8 || e->reserved != 0)
return -EINVAL;
return 0;
}
static inline int nested_vmx_load_msr_check(struct vmx_msr_entry *e)
{
if (e->index == MSR_FS_BASE ||
e->index == MSR_GS_BASE ||
nested_vmx_msr_check_common(e))
return -EINVAL;
return 0;
}
/*
* Load guest's/host's msr at nested entry/exit.
* return 0 for success, entry index for failure.
*/
static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
{
u32 i;
struct vmx_msr_entry e;
struct msr_data msr;
msr.host_initiated = false;
for (i = 0; i < count; i++) {
kvm_read_guest(vcpu->kvm, gpa + i * sizeof(e), &e, sizeof(e));
if (nested_vmx_load_msr_check(&e))
goto fail;
msr.index = e.index;
msr.data = e.value;
if (kvm_set_msr(vcpu, &msr))
goto fail;
}
return 0;
fail:
return i + 1;
}
static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
{
u32 i;
struct vmx_msr_entry e;
for (i = 0; i < count; i++) {
kvm_read_guest(vcpu->kvm, gpa + i * sizeof(e),
&e, 2 * sizeof(u32));
if (nested_vmx_msr_check_common(&e))
return -EINVAL;
if (kvm_get_msr(vcpu, e.index, &e.value))
return -EINVAL;
kvm_write_guest(vcpu->kvm,
gpa + i * sizeof(e) +
offsetof(struct vmx_msr_entry, value),
&e.value, sizeof(e.value));
}
return 0;
}
/* /*
* prepare_vmcs02 is called when the L1 guest hypervisor runs its nested * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested
* L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it
...@@ -8582,6 +8650,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) ...@@ -8582,6 +8650,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
int cpu; int cpu;
struct loaded_vmcs *vmcs02; struct loaded_vmcs *vmcs02;
bool ia32e; bool ia32e;
u32 msr_entry_idx;
if (!nested_vmx_check_permission(vcpu) || if (!nested_vmx_check_permission(vcpu) ||
!nested_vmx_check_vmcs12(vcpu)) !nested_vmx_check_vmcs12(vcpu))
...@@ -8629,15 +8698,6 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) ...@@ -8629,15 +8698,6 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
return 1; return 1;
} }
if (vmcs12->vm_entry_msr_load_count > 0 ||
vmcs12->vm_exit_msr_load_count > 0 ||
vmcs12->vm_exit_msr_store_count > 0) {
pr_warn_ratelimited("%s: VMCS MSR_{LOAD,STORE} unsupported\n",
__func__);
nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
return 1;
}
if (!vmx_control_verify(vmcs12->cpu_based_vm_exec_control, if (!vmx_control_verify(vmcs12->cpu_based_vm_exec_control,
nested_vmx_true_procbased_ctls_low, nested_vmx_true_procbased_ctls_low,
nested_vmx_procbased_ctls_high) || nested_vmx_procbased_ctls_high) ||
...@@ -8739,10 +8799,21 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) ...@@ -8739,10 +8799,21 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
vmx_segment_cache_clear(vmx); vmx_segment_cache_clear(vmx);
vmcs12->launch_state = 1;
prepare_vmcs02(vcpu, vmcs12); prepare_vmcs02(vcpu, vmcs12);
msr_entry_idx = nested_vmx_load_msr(vcpu,
vmcs12->vm_entry_msr_load_addr,
vmcs12->vm_entry_msr_load_count);
if (msr_entry_idx) {
leave_guest_mode(vcpu);
vmx_load_vmcs01(vcpu);
nested_vmx_entry_failure(vcpu, vmcs12,
EXIT_REASON_MSR_LOAD_FAIL, msr_entry_idx);
return 1;
}
vmcs12->launch_state = 1;
if (vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT) if (vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT)
return kvm_emulate_halt(vcpu); return kvm_emulate_halt(vcpu);
...@@ -9172,6 +9243,10 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, ...@@ -9172,6 +9243,10 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
kvm_set_dr(vcpu, 7, 0x400); kvm_set_dr(vcpu, 7, 0x400);
vmcs_write64(GUEST_IA32_DEBUGCTL, 0); vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr,
vmcs12->vm_exit_msr_load_count))
nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
} }
/* /*
...@@ -9193,6 +9268,10 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, ...@@ -9193,6 +9268,10 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
prepare_vmcs12(vcpu, vmcs12, exit_reason, exit_intr_info, prepare_vmcs12(vcpu, vmcs12, exit_reason, exit_intr_info,
exit_qualification); exit_qualification);
if (nested_vmx_store_msr(vcpu, vmcs12->vm_exit_msr_store_addr,
vmcs12->vm_exit_msr_store_count))
nested_vmx_abort(vcpu, VMX_ABORT_SAVE_GUEST_MSR_FAIL);
vmx_load_vmcs01(vcpu); vmx_load_vmcs01(vcpu);
if ((exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT) if ((exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT)
......
...@@ -2324,6 +2324,7 @@ int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) ...@@ -2324,6 +2324,7 @@ int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
{ {
return kvm_x86_ops->get_msr(vcpu, msr_index, pdata); return kvm_x86_ops->get_msr(vcpu, msr_index, pdata);
} }
EXPORT_SYMBOL_GPL(kvm_get_msr);
static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
{ {
......
...@@ -1593,6 +1593,7 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, ...@@ -1593,6 +1593,7 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
} }
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(kvm_write_guest);
int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
gpa_t gpa, unsigned long len) gpa_t gpa, unsigned long len)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment