Commit 8373d25d authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: VMX: Add vmx.h to hold VMX definitions

Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 609363cf
......@@ -60,6 +60,7 @@
#include "vmcs.h"
#include "vmcs12.h"
#include "x86.h"
#include "vmx.h"
#define __ex(x) __kvm_handle_fault_on_reboot(x)
#define __ex_clear(x, reg) \
......@@ -120,10 +121,6 @@ static u64 __read_mostly host_xss;
bool __read_mostly enable_pml = 1;
module_param_named(pml, enable_pml, bool, S_IRUGO);
#define MSR_TYPE_R 1
#define MSR_TYPE_W 2
#define MSR_TYPE_RW 3
#define MSR_BITMAP_MODE_X2APIC 1
#define MSR_BITMAP_MODE_X2APIC_APICV 2
......@@ -345,317 +342,6 @@ static const struct kernel_param_ops vmentry_l1d_flush_ops = {
};
module_param_cb(vmentry_l1d_flush, &vmentry_l1d_flush_ops, NULL, 0644);
enum ept_pointers_status {
EPT_POINTERS_CHECK = 0,
EPT_POINTERS_MATCH = 1,
EPT_POINTERS_MISMATCH = 2
};
struct kvm_vmx {
struct kvm kvm;
unsigned int tss_addr;
bool ept_identity_pagetable_done;
gpa_t ept_identity_map_addr;
enum ept_pointers_status ept_pointers_match;
spinlock_t ept_pointer_lock;
};
struct shared_msr_entry {
unsigned index;
u64 data;
u64 mask;
};
/*
* The nested_vmx structure is part of vcpu_vmx, and holds information we need
* for correct emulation of VMX (i.e., nested VMX) on this vcpu.
*/
struct nested_vmx {
/* Has the level1 guest done vmxon? */
bool vmxon;
gpa_t vmxon_ptr;
bool pml_full;
/* The guest-physical address of the current VMCS L1 keeps for L2 */
gpa_t current_vmptr;
/*
* Cache of the guest's VMCS, existing outside of guest memory.
* Loaded from guest memory during VMPTRLD. Flushed to guest
* memory during VMCLEAR and VMPTRLD.
*/
struct vmcs12 *cached_vmcs12;
/*
* Cache of the guest's shadow VMCS, existing outside of guest
* memory. Loaded from guest memory during VM entry. Flushed
* to guest memory during VM exit.
*/
struct vmcs12 *cached_shadow_vmcs12;
/*
* Indicates if the shadow vmcs or enlightened vmcs must be updated
* with the data held by struct vmcs12.
*/
bool need_vmcs12_sync;
bool dirty_vmcs12;
/*
* vmcs02 has been initialized, i.e. state that is constant for
* vmcs02 has been written to the backing VMCS. Initialization
* is delayed until L1 actually attempts to run a nested VM.
*/
bool vmcs02_initialized;
bool change_vmcs01_virtual_apic_mode;
/*
* Enlightened VMCS has been enabled. It does not mean that L1 has to
* use it. However, VMX features available to L1 will be limited based
* on what the enlightened VMCS supports.
*/
bool enlightened_vmcs_enabled;
/* L2 must run next, and mustn't decide to exit to L1. */
bool nested_run_pending;
struct loaded_vmcs vmcs02;
/*
* Guest pages referred to in the vmcs02 with host-physical
* pointers, so we must keep them pinned while L2 runs.
*/
struct page *apic_access_page;
struct page *virtual_apic_page;
struct page *pi_desc_page;
struct pi_desc *pi_desc;
bool pi_pending;
u16 posted_intr_nv;
struct hrtimer preemption_timer;
bool preemption_timer_expired;
/* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */
u64 vmcs01_debugctl;
u64 vmcs01_guest_bndcfgs;
u16 vpid02;
u16 last_vpid;
struct nested_vmx_msrs msrs;
/* SMM related state */
struct {
/* in VMX operation on SMM entry? */
bool vmxon;
/* in guest mode on SMM entry? */
bool guest_mode;
} smm;
gpa_t hv_evmcs_vmptr;
struct page *hv_evmcs_page;
struct hv_enlightened_vmcs *hv_evmcs;
};
#define POSTED_INTR_ON 0
#define POSTED_INTR_SN 1
/* Posted-Interrupt Descriptor */
struct pi_desc {
u32 pir[8]; /* Posted interrupt requested */
union {
struct {
/* bit 256 - Outstanding Notification */
u16 on : 1,
/* bit 257 - Suppress Notification */
sn : 1,
/* bit 271:258 - Reserved */
rsvd_1 : 14;
/* bit 279:272 - Notification Vector */
u8 nv;
/* bit 287:280 - Reserved */
u8 rsvd_2;
/* bit 319:288 - Notification Destination */
u32 ndst;
};
u64 control;
};
u32 rsvd[6];
} __aligned(64);
static bool pi_test_and_set_on(struct pi_desc *pi_desc)
{
return test_and_set_bit(POSTED_INTR_ON,
(unsigned long *)&pi_desc->control);
}
static bool pi_test_and_clear_on(struct pi_desc *pi_desc)
{
return test_and_clear_bit(POSTED_INTR_ON,
(unsigned long *)&pi_desc->control);
}
static int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc)
{
return test_and_set_bit(vector, (unsigned long *)pi_desc->pir);
}
static inline void pi_clear_sn(struct pi_desc *pi_desc)
{
return clear_bit(POSTED_INTR_SN,
(unsigned long *)&pi_desc->control);
}
static inline void pi_set_sn(struct pi_desc *pi_desc)
{
return set_bit(POSTED_INTR_SN,
(unsigned long *)&pi_desc->control);
}
static inline void pi_clear_on(struct pi_desc *pi_desc)
{
clear_bit(POSTED_INTR_ON,
(unsigned long *)&pi_desc->control);
}
static inline int pi_test_on(struct pi_desc *pi_desc)
{
return test_bit(POSTED_INTR_ON,
(unsigned long *)&pi_desc->control);
}
static inline int pi_test_sn(struct pi_desc *pi_desc)
{
return test_bit(POSTED_INTR_SN,
(unsigned long *)&pi_desc->control);
}
#define NR_AUTOLOAD_MSRS 8
struct vmx_msrs {
unsigned int nr;
struct vmx_msr_entry val[NR_AUTOLOAD_MSRS];
};
struct vcpu_vmx {
struct kvm_vcpu vcpu;
unsigned long host_rsp;
u8 fail;
u8 msr_bitmap_mode;
u32 exit_intr_info;
u32 idt_vectoring_info;
ulong rflags;
struct shared_msr_entry *guest_msrs;
int nmsrs;
int save_nmsrs;
bool guest_msrs_dirty;
unsigned long host_idt_base;
#ifdef CONFIG_X86_64
u64 msr_host_kernel_gs_base;
u64 msr_guest_kernel_gs_base;
#endif
u64 arch_capabilities;
u64 spec_ctrl;
u32 vm_entry_controls_shadow;
u32 vm_exit_controls_shadow;
u32 secondary_exec_control;
/*
* loaded_vmcs points to the VMCS currently used in this vcpu. For a
* non-nested (L1) guest, it always points to vmcs01. For a nested
* guest (L2), it points to a different VMCS. loaded_cpu_state points
* to the VMCS whose state is loaded into the CPU registers that only
* need to be switched when transitioning to/from the kernel; a NULL
* value indicates that host state is loaded.
*/
struct loaded_vmcs vmcs01;
struct loaded_vmcs *loaded_vmcs;
struct loaded_vmcs *loaded_cpu_state;
bool __launched; /* temporary, used in vmx_vcpu_run */
struct msr_autoload {
struct vmx_msrs guest;
struct vmx_msrs host;
} msr_autoload;
struct {
int vm86_active;
ulong save_rflags;
struct kvm_segment segs[8];
} rmode;
struct {
u32 bitmask; /* 4 bits per segment (1 bit per field) */
struct kvm_save_segment {
u16 selector;
unsigned long base;
u32 limit;
u32 ar;
} seg[8];
} segment_cache;
int vpid;
bool emulation_required;
u32 exit_reason;
/* Posted interrupt descriptor */
struct pi_desc pi_desc;
/* Support for a guest hypervisor (nested VMX) */
struct nested_vmx nested;
/* Dynamic PLE window. */
int ple_window;
bool ple_window_dirty;
bool req_immediate_exit;
/* Support for PML */
#define PML_ENTITY_NUM 512
struct page *pml_pg;
/* apic deadline value in host tsc */
u64 hv_deadline_tsc;
u64 current_tsc_ratio;
u32 host_pkru;
unsigned long host_debugctlmsr;
/*
* Only bits masked by msr_ia32_feature_control_valid_bits can be set in
* msr_ia32_feature_control. FEATURE_CONTROL_LOCKED is always included
* in msr_ia32_feature_control_valid_bits.
*/
u64 msr_ia32_feature_control;
u64 msr_ia32_feature_control_valid_bits;
u64 ept_pointer;
};
enum segment_cache_field {
SEG_FIELD_SEL = 0,
SEG_FIELD_BASE = 1,
SEG_FIELD_LIMIT = 2,
SEG_FIELD_AR = 3,
SEG_FIELD_NR = 4
};
static inline struct kvm_vmx *to_kvm_vmx(struct kvm *kvm)
{
return container_of(kvm, struct kvm_vmx, kvm);
}
static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
{
return container_of(vcpu, struct vcpu_vmx, vcpu);
}
static struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu)
{
return &(to_vmx(vcpu)->pi_desc);
}
static u16 shadow_read_only_fields[] = {
#define SHADOW_FIELD_RO(x) x,
#include "vmcs_shadow_fields.h"
......@@ -1625,11 +1311,6 @@ static inline void vm_exit_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) & ~val);
}
static void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
{
vmx->segment_cache.bitmask = 0;
}
static bool vmx_segment_cache_test_set(struct vcpu_vmx *vmx, unsigned seg,
unsigned field)
{
......@@ -5152,8 +4833,6 @@ static u8 vmx_msr_bitmap_mode(struct kvm_vcpu *vcpu)
return mode;
}
#define X2APIC_MSR(r) (APIC_BASE_MSR + ((r) >> 4))
static void vmx_update_msr_bitmap_x2apic(unsigned long *msr_bitmap,
u8 mode)
{
......@@ -5478,47 +5157,6 @@ static void vmx_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
vmx_update_msr_bitmap(vcpu);
}
static u32 vmx_vmentry_ctrl(void)
{
/* Loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically */
return vmcs_config.vmentry_ctrl &
~(VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VM_ENTRY_LOAD_IA32_EFER);
}
static u32 vmx_vmexit_ctrl(void)
{
/* Loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically */
return vmcs_config.vmexit_ctrl &
~(VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | VM_EXIT_LOAD_IA32_EFER);
}
static u32 vmx_exec_control(struct vcpu_vmx *vmx)
{
u32 exec_control = vmcs_config.cpu_based_exec_ctrl;
if (vmx->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)
exec_control &= ~CPU_BASED_MOV_DR_EXITING;
if (!cpu_need_tpr_shadow(&vmx->vcpu)) {
exec_control &= ~CPU_BASED_TPR_SHADOW;
#ifdef CONFIG_X86_64
exec_control |= CPU_BASED_CR8_STORE_EXITING |
CPU_BASED_CR8_LOAD_EXITING;
#endif
}
if (!enable_ept)
exec_control |= CPU_BASED_CR3_STORE_EXITING |
CPU_BASED_CR3_LOAD_EXITING |
CPU_BASED_INVLPG_EXITING;
if (kvm_mwait_in_guest(vmx->vcpu.kvm))
exec_control &= ~(CPU_BASED_MWAIT_EXITING |
CPU_BASED_MONITOR_EXITING);
if (kvm_hlt_in_guest(vmx->vcpu.kvm))
exec_control &= ~CPU_BASED_HLT_EXITING;
return exec_control;
}
static void vmx_compute_secondary_exec_control(struct vcpu_vmx *vmx)
{
struct kvm_vcpu *vcpu = &vmx->vcpu;
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __KVM_X86_VMX_H
#define __KVM_X86_VMX_H
#include <linux/kvm_host.h>
#include <asm/kvm.h>
#include "capabilities.h"
#include "vmcs.h"
#define MSR_TYPE_R 1
#define MSR_TYPE_W 2
#define MSR_TYPE_RW 3
#define X2APIC_MSR(r) (APIC_BASE_MSR + ((r) >> 4))
#define NR_AUTOLOAD_MSRS 8
struct vmx_msrs {
unsigned int nr;
struct vmx_msr_entry val[NR_AUTOLOAD_MSRS];
};
struct shared_msr_entry {
unsigned index;
u64 data;
u64 mask;
};
enum segment_cache_field {
SEG_FIELD_SEL = 0,
SEG_FIELD_BASE = 1,
SEG_FIELD_LIMIT = 2,
SEG_FIELD_AR = 3,
SEG_FIELD_NR = 4
};
/* Posted-Interrupt Descriptor */
struct pi_desc {
u32 pir[8]; /* Posted interrupt requested */
union {
struct {
/* bit 256 - Outstanding Notification */
u16 on : 1,
/* bit 257 - Suppress Notification */
sn : 1,
/* bit 271:258 - Reserved */
rsvd_1 : 14;
/* bit 279:272 - Notification Vector */
u8 nv;
/* bit 287:280 - Reserved */
u8 rsvd_2;
/* bit 319:288 - Notification Destination */
u32 ndst;
};
u64 control;
};
u32 rsvd[6];
} __aligned(64);
/*
* The nested_vmx structure is part of vcpu_vmx, and holds information we need
* for correct emulation of VMX (i.e., nested VMX) on this vcpu.
*/
struct nested_vmx {
/* Has the level1 guest done vmxon? */
bool vmxon;
gpa_t vmxon_ptr;
bool pml_full;
/* The guest-physical address of the current VMCS L1 keeps for L2 */
gpa_t current_vmptr;
/*
* Cache of the guest's VMCS, existing outside of guest memory.
* Loaded from guest memory during VMPTRLD. Flushed to guest
* memory during VMCLEAR and VMPTRLD.
*/
struct vmcs12 *cached_vmcs12;
/*
* Cache of the guest's shadow VMCS, existing outside of guest
* memory. Loaded from guest memory during VM entry. Flushed
* to guest memory during VM exit.
*/
struct vmcs12 *cached_shadow_vmcs12;
/*
* Indicates if the shadow vmcs or enlightened vmcs must be updated
* with the data held by struct vmcs12.
*/
bool need_vmcs12_sync;
bool dirty_vmcs12;
/*
* vmcs02 has been initialized, i.e. state that is constant for
* vmcs02 has been written to the backing VMCS. Initialization
* is delayed until L1 actually attempts to run a nested VM.
*/
bool vmcs02_initialized;
bool change_vmcs01_virtual_apic_mode;
/*
* Enlightened VMCS has been enabled. It does not mean that L1 has to
* use it. However, VMX features available to L1 will be limited based
* on what the enlightened VMCS supports.
*/
bool enlightened_vmcs_enabled;
/* L2 must run next, and mustn't decide to exit to L1. */
bool nested_run_pending;
struct loaded_vmcs vmcs02;
/*
* Guest pages referred to in the vmcs02 with host-physical
* pointers, so we must keep them pinned while L2 runs.
*/
struct page *apic_access_page;
struct page *virtual_apic_page;
struct page *pi_desc_page;
struct pi_desc *pi_desc;
bool pi_pending;
u16 posted_intr_nv;
struct hrtimer preemption_timer;
bool preemption_timer_expired;
/* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */
u64 vmcs01_debugctl;
u64 vmcs01_guest_bndcfgs;
u16 vpid02;
u16 last_vpid;
struct nested_vmx_msrs msrs;
/* SMM related state */
struct {
/* in VMX operation on SMM entry? */
bool vmxon;
/* in guest mode on SMM entry? */
bool guest_mode;
} smm;
gpa_t hv_evmcs_vmptr;
struct page *hv_evmcs_page;
struct hv_enlightened_vmcs *hv_evmcs;
};
struct vcpu_vmx {
struct kvm_vcpu vcpu;
unsigned long host_rsp;
u8 fail;
u8 msr_bitmap_mode;
u32 exit_intr_info;
u32 idt_vectoring_info;
ulong rflags;
struct shared_msr_entry *guest_msrs;
int nmsrs;
int save_nmsrs;
bool guest_msrs_dirty;
unsigned long host_idt_base;
#ifdef CONFIG_X86_64
u64 msr_host_kernel_gs_base;
u64 msr_guest_kernel_gs_base;
#endif
u64 arch_capabilities;
u64 spec_ctrl;
u32 vm_entry_controls_shadow;
u32 vm_exit_controls_shadow;
u32 secondary_exec_control;
/*
* loaded_vmcs points to the VMCS currently used in this vcpu. For a
* non-nested (L1) guest, it always points to vmcs01. For a nested
* guest (L2), it points to a different VMCS. loaded_cpu_state points
* to the VMCS whose state is loaded into the CPU registers that only
* need to be switched when transitioning to/from the kernel; a NULL
* value indicates that host state is loaded.
*/
struct loaded_vmcs vmcs01;
struct loaded_vmcs *loaded_vmcs;
struct loaded_vmcs *loaded_cpu_state;
bool __launched; /* temporary, used in vmx_vcpu_run */
struct msr_autoload {
struct vmx_msrs guest;
struct vmx_msrs host;
} msr_autoload;
struct {
int vm86_active;
ulong save_rflags;
struct kvm_segment segs[8];
} rmode;
struct {
u32 bitmask; /* 4 bits per segment (1 bit per field) */
struct kvm_save_segment {
u16 selector;
unsigned long base;
u32 limit;
u32 ar;
} seg[8];
} segment_cache;
int vpid;
bool emulation_required;
u32 exit_reason;
/* Posted interrupt descriptor */
struct pi_desc pi_desc;
/* Support for a guest hypervisor (nested VMX) */
struct nested_vmx nested;
/* Dynamic PLE window. */
int ple_window;
bool ple_window_dirty;
bool req_immediate_exit;
/* Support for PML */
#define PML_ENTITY_NUM 512
struct page *pml_pg;
/* apic deadline value in host tsc */
u64 hv_deadline_tsc;
u64 current_tsc_ratio;
u32 host_pkru;
unsigned long host_debugctlmsr;
/*
* Only bits masked by msr_ia32_feature_control_valid_bits can be set in
* msr_ia32_feature_control. FEATURE_CONTROL_LOCKED is always included
* in msr_ia32_feature_control_valid_bits.
*/
u64 msr_ia32_feature_control;
u64 msr_ia32_feature_control_valid_bits;
u64 ept_pointer;
};
enum ept_pointers_status {
EPT_POINTERS_CHECK = 0,
EPT_POINTERS_MATCH = 1,
EPT_POINTERS_MISMATCH = 2
};
struct kvm_vmx {
struct kvm kvm;
unsigned int tss_addr;
bool ept_identity_pagetable_done;
gpa_t ept_identity_map_addr;
enum ept_pointers_status ept_pointers_match;
spinlock_t ept_pointer_lock;
};
#define POSTED_INTR_ON 0
#define POSTED_INTR_SN 1
static inline bool pi_test_and_set_on(struct pi_desc *pi_desc)
{
return test_and_set_bit(POSTED_INTR_ON,
(unsigned long *)&pi_desc->control);
}
static inline bool pi_test_and_clear_on(struct pi_desc *pi_desc)
{
return test_and_clear_bit(POSTED_INTR_ON,
(unsigned long *)&pi_desc->control);
}
static inline int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc)
{
return test_and_set_bit(vector, (unsigned long *)pi_desc->pir);
}
static inline void pi_clear_sn(struct pi_desc *pi_desc)
{
return clear_bit(POSTED_INTR_SN,
(unsigned long *)&pi_desc->control);
}
static inline void pi_set_sn(struct pi_desc *pi_desc)
{
return set_bit(POSTED_INTR_SN,
(unsigned long *)&pi_desc->control);
}
static inline void pi_clear_on(struct pi_desc *pi_desc)
{
clear_bit(POSTED_INTR_ON,
(unsigned long *)&pi_desc->control);
}
static inline int pi_test_on(struct pi_desc *pi_desc)
{
return test_bit(POSTED_INTR_ON,
(unsigned long *)&pi_desc->control);
}
static inline int pi_test_sn(struct pi_desc *pi_desc)
{
return test_bit(POSTED_INTR_SN,
(unsigned long *)&pi_desc->control);
}
static inline void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
{
vmx->segment_cache.bitmask = 0;
}
static inline u32 vmx_vmentry_ctrl(void)
{
/* Loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically */
return vmcs_config.vmentry_ctrl &
~(VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VM_ENTRY_LOAD_IA32_EFER);
}
static inline u32 vmx_vmexit_ctrl(void)
{
/* Loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically */
return vmcs_config.vmexit_ctrl &
~(VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | VM_EXIT_LOAD_IA32_EFER);
}
u32 vmx_exec_control(struct vcpu_vmx *vmx);
static inline struct kvm_vmx *to_kvm_vmx(struct kvm *kvm)
{
return container_of(kvm, struct kvm_vmx, kvm);
}
static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
{
return container_of(vcpu, struct vcpu_vmx, vcpu);
}
static inline struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu)
{
return &(to_vmx(vcpu)->pi_desc);
}
#endif /* __KVM_X86_VMX_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment