Commit 2961e876 authored by Gleb Natapov's avatar Gleb Natapov Committed by Paolo Bonzini

KVM: VMX: shadow VM_(ENTRY|EXIT)_CONTROLS vmcs field

VM_(ENTRY|EXIT)_CONTROLS vmcs fields are read/written on each guest
entry but most times it can be avoided since values do not changes.
Keep fields copy in memory to avoid unnecessary reads from vmcs.
Signed-off-by: default avatarGleb Natapov <gleb@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 8494bd0e
...@@ -418,6 +418,8 @@ struct vcpu_vmx { ...@@ -418,6 +418,8 @@ struct vcpu_vmx {
u64 msr_host_kernel_gs_base; u64 msr_host_kernel_gs_base;
u64 msr_guest_kernel_gs_base; u64 msr_guest_kernel_gs_base;
#endif #endif
u32 vm_entry_controls_shadow;
u32 vm_exit_controls_shadow;
/* /*
* loaded_vmcs points to the VMCS currently used in this vcpu. For a * loaded_vmcs points to the VMCS currently used in this vcpu. For a
* non-nested (L1) guest, it always points to vmcs01. For a nested * non-nested (L1) guest, it always points to vmcs01. For a nested
...@@ -1326,6 +1328,62 @@ static void vmcs_set_bits(unsigned long field, u32 mask) ...@@ -1326,6 +1328,62 @@ static void vmcs_set_bits(unsigned long field, u32 mask)
vmcs_writel(field, vmcs_readl(field) | mask); vmcs_writel(field, vmcs_readl(field) | mask);
} }
static inline void vm_entry_controls_init(struct vcpu_vmx *vmx, u32 val)
{
vmcs_write32(VM_ENTRY_CONTROLS, val);
vmx->vm_entry_controls_shadow = val;
}
static inline void vm_entry_controls_set(struct vcpu_vmx *vmx, u32 val)
{
if (vmx->vm_entry_controls_shadow != val)
vm_entry_controls_init(vmx, val);
}
static inline u32 vm_entry_controls_get(struct vcpu_vmx *vmx)
{
return vmx->vm_entry_controls_shadow;
}
static inline void vm_entry_controls_setbit(struct vcpu_vmx *vmx, u32 val)
{
vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) | val);
}
static inline void vm_entry_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
{
vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) & ~val);
}
static inline void vm_exit_controls_init(struct vcpu_vmx *vmx, u32 val)
{
vmcs_write32(VM_EXIT_CONTROLS, val);
vmx->vm_exit_controls_shadow = val;
}
static inline void vm_exit_controls_set(struct vcpu_vmx *vmx, u32 val)
{
if (vmx->vm_exit_controls_shadow != val)
vm_exit_controls_init(vmx, val);
}
static inline u32 vm_exit_controls_get(struct vcpu_vmx *vmx)
{
return vmx->vm_exit_controls_shadow;
}
static inline void vm_exit_controls_setbit(struct vcpu_vmx *vmx, u32 val)
{
vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) | val);
}
static inline void vm_exit_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
{
vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) & ~val);
}
static void vmx_segment_cache_clear(struct vcpu_vmx *vmx) static void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
{ {
vmx->segment_cache.bitmask = 0; vmx->segment_cache.bitmask = 0;
...@@ -1410,11 +1468,11 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu) ...@@ -1410,11 +1468,11 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
vmcs_write32(EXCEPTION_BITMAP, eb); vmcs_write32(EXCEPTION_BITMAP, eb);
} }
static void clear_atomic_switch_msr_special(unsigned long entry, static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx,
unsigned long exit) unsigned long entry, unsigned long exit)
{ {
vmcs_clear_bits(VM_ENTRY_CONTROLS, entry); vm_entry_controls_clearbit(vmx, entry);
vmcs_clear_bits(VM_EXIT_CONTROLS, exit); vm_exit_controls_clearbit(vmx, exit);
} }
static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr) static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
...@@ -1425,14 +1483,15 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr) ...@@ -1425,14 +1483,15 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
switch (msr) { switch (msr) {
case MSR_EFER: case MSR_EFER:
if (cpu_has_load_ia32_efer) { if (cpu_has_load_ia32_efer) {
clear_atomic_switch_msr_special(VM_ENTRY_LOAD_IA32_EFER, clear_atomic_switch_msr_special(vmx,
VM_ENTRY_LOAD_IA32_EFER,
VM_EXIT_LOAD_IA32_EFER); VM_EXIT_LOAD_IA32_EFER);
return; return;
} }
break; break;
case MSR_CORE_PERF_GLOBAL_CTRL: case MSR_CORE_PERF_GLOBAL_CTRL:
if (cpu_has_load_perf_global_ctrl) { if (cpu_has_load_perf_global_ctrl) {
clear_atomic_switch_msr_special( clear_atomic_switch_msr_special(vmx,
VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL, VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL); VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
return; return;
...@@ -1453,14 +1512,15 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr) ...@@ -1453,14 +1512,15 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr); vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
} }
static void add_atomic_switch_msr_special(unsigned long entry, static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
unsigned long exit, unsigned long guest_val_vmcs, unsigned long entry, unsigned long exit,
unsigned long host_val_vmcs, u64 guest_val, u64 host_val) unsigned long guest_val_vmcs, unsigned long host_val_vmcs,
u64 guest_val, u64 host_val)
{ {
vmcs_write64(guest_val_vmcs, guest_val); vmcs_write64(guest_val_vmcs, guest_val);
vmcs_write64(host_val_vmcs, host_val); vmcs_write64(host_val_vmcs, host_val);
vmcs_set_bits(VM_ENTRY_CONTROLS, entry); vm_entry_controls_setbit(vmx, entry);
vmcs_set_bits(VM_EXIT_CONTROLS, exit); vm_exit_controls_setbit(vmx, exit);
} }
static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
...@@ -1472,7 +1532,8 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, ...@@ -1472,7 +1532,8 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
switch (msr) { switch (msr) {
case MSR_EFER: case MSR_EFER:
if (cpu_has_load_ia32_efer) { if (cpu_has_load_ia32_efer) {
add_atomic_switch_msr_special(VM_ENTRY_LOAD_IA32_EFER, add_atomic_switch_msr_special(vmx,
VM_ENTRY_LOAD_IA32_EFER,
VM_EXIT_LOAD_IA32_EFER, VM_EXIT_LOAD_IA32_EFER,
GUEST_IA32_EFER, GUEST_IA32_EFER,
HOST_IA32_EFER, HOST_IA32_EFER,
...@@ -1482,7 +1543,7 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, ...@@ -1482,7 +1543,7 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
break; break;
case MSR_CORE_PERF_GLOBAL_CTRL: case MSR_CORE_PERF_GLOBAL_CTRL:
if (cpu_has_load_perf_global_ctrl) { if (cpu_has_load_perf_global_ctrl) {
add_atomic_switch_msr_special( add_atomic_switch_msr_special(vmx,
VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL, VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL, VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL,
GUEST_IA32_PERF_GLOBAL_CTRL, GUEST_IA32_PERF_GLOBAL_CTRL,
...@@ -3182,14 +3243,10 @@ static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer) ...@@ -3182,14 +3243,10 @@ static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
vmx_load_host_state(to_vmx(vcpu)); vmx_load_host_state(to_vmx(vcpu));
vcpu->arch.efer = efer; vcpu->arch.efer = efer;
if (efer & EFER_LMA) { if (efer & EFER_LMA) {
vmcs_write32(VM_ENTRY_CONTROLS, vm_entry_controls_setbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
vmcs_read32(VM_ENTRY_CONTROLS) |
VM_ENTRY_IA32E_MODE);
msr->data = efer; msr->data = efer;
} else { } else {
vmcs_write32(VM_ENTRY_CONTROLS, vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
vmcs_read32(VM_ENTRY_CONTROLS) &
~VM_ENTRY_IA32E_MODE);
msr->data = efer & ~EFER_LME; msr->data = efer & ~EFER_LME;
} }
...@@ -3217,9 +3274,7 @@ static void enter_lmode(struct kvm_vcpu *vcpu) ...@@ -3217,9 +3274,7 @@ static void enter_lmode(struct kvm_vcpu *vcpu)
static void exit_lmode(struct kvm_vcpu *vcpu) static void exit_lmode(struct kvm_vcpu *vcpu)
{ {
vmcs_write32(VM_ENTRY_CONTROLS, vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
vmcs_read32(VM_ENTRY_CONTROLS)
& ~VM_ENTRY_IA32E_MODE);
vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA); vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA);
} }
...@@ -4346,10 +4401,11 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) ...@@ -4346,10 +4401,11 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
++vmx->nmsrs; ++vmx->nmsrs;
} }
vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl);
vm_exit_controls_init(vmx, vmcs_config.vmexit_ctrl);
/* 22.2.1, 20.8.1 */ /* 22.2.1, 20.8.1 */
vmcs_write32(VM_ENTRY_CONTROLS, vmcs_config.vmentry_ctrl); vm_entry_controls_init(vmx, vmcs_config.vmentry_ctrl);
vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL); vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
set_cr4_guest_host_mask(vmx); set_cr4_guest_host_mask(vmx);
...@@ -7759,12 +7815,12 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) ...@@ -7759,12 +7815,12 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
exit_control = vmcs_config.vmexit_ctrl; exit_control = vmcs_config.vmexit_ctrl;
if (vmcs12->pin_based_vm_exec_control & PIN_BASED_VMX_PREEMPTION_TIMER) if (vmcs12->pin_based_vm_exec_control & PIN_BASED_VMX_PREEMPTION_TIMER)
exit_control |= VM_EXIT_SAVE_VMX_PREEMPTION_TIMER; exit_control |= VM_EXIT_SAVE_VMX_PREEMPTION_TIMER;
vmcs_write32(VM_EXIT_CONTROLS, exit_control); vm_exit_controls_init(vmx, exit_control);
/* vmcs12's VM_ENTRY_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE are /* vmcs12's VM_ENTRY_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE are
* emulated by vmx_set_efer(), below. * emulated by vmx_set_efer(), below.
*/ */
vmcs_write32(VM_ENTRY_CONTROLS, vm_entry_controls_init(vmx,
(vmcs12->vm_entry_controls & ~VM_ENTRY_LOAD_IA32_EFER & (vmcs12->vm_entry_controls & ~VM_ENTRY_LOAD_IA32_EFER &
~VM_ENTRY_IA32E_MODE) | ~VM_ENTRY_IA32E_MODE) |
(vmcs_config.vmentry_ctrl & ~VM_ENTRY_IA32E_MODE)); (vmcs_config.vmentry_ctrl & ~VM_ENTRY_IA32E_MODE));
...@@ -8186,7 +8242,7 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) ...@@ -8186,7 +8242,7 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
vmcs12->vm_entry_controls = vmcs12->vm_entry_controls =
(vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) | (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) |
(vmcs_read32(VM_ENTRY_CONTROLS) & VM_ENTRY_IA32E_MODE); (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE);
/* TODO: These cannot have changed unless we have MSR bitmaps and /* TODO: These cannot have changed unless we have MSR bitmaps and
* the relevant bit asks not to trap the change */ * the relevant bit asks not to trap the change */
...@@ -8390,6 +8446,8 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu) ...@@ -8390,6 +8446,8 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu)
vcpu->cpu = cpu; vcpu->cpu = cpu;
put_cpu(); put_cpu();
vm_entry_controls_init(vmx, vmcs_read32(VM_ENTRY_CONTROLS));
vm_exit_controls_init(vmx, vmcs_read32(VM_EXIT_CONTROLS));
vmx_segment_cache_clear(vmx); vmx_segment_cache_clear(vmx);
/* if no vmcs02 cache requested, remove the one we used */ /* if no vmcs02 cache requested, remove the one we used */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment