Commit 707d92fa authored by Rusty Russell's avatar Rusty Russell Committed by Avi Kivity

KVM: Trivial: Use standard CR0 flags macros from asm/cpu-features.h

The kernel now has asm/cpu-features.h: use those macros instead of
inventing our own.

Also spell out definition of CR0_RESEVED_BITS (no code change) and fix typo.
Signed-off-by: default avatarRusty Russell <rusty@rustcorp.com.au>
Signed-off-by: default avatarAvi Kivity <avi@qumranet.com>
parent 9a2b85c6
...@@ -19,15 +19,6 @@ ...@@ -19,15 +19,6 @@
#include <linux/kvm.h> #include <linux/kvm.h>
#include <linux/kvm_para.h> #include <linux/kvm_para.h>
#define CR0_PE_MASK (1ULL << 0)
#define CR0_MP_MASK (1ULL << 1)
#define CR0_TS_MASK (1ULL << 3)
#define CR0_NE_MASK (1ULL << 5)
#define CR0_WP_MASK (1ULL << 16)
#define CR0_NW_MASK (1ULL << 29)
#define CR0_CD_MASK (1ULL << 30)
#define CR0_PG_MASK (1ULL << 31)
#define CR3_WPT_MASK (1ULL << 3) #define CR3_WPT_MASK (1ULL << 3)
#define CR3_PCD_MASK (1ULL << 4) #define CR3_PCD_MASK (1ULL << 4)
...@@ -42,11 +33,11 @@ ...@@ -42,11 +33,11 @@
#define CR4_VMXE_MASK (1ULL << 13) #define CR4_VMXE_MASK (1ULL << 13)
#define KVM_GUEST_CR0_MASK \ #define KVM_GUEST_CR0_MASK \
(CR0_PG_MASK | CR0_PE_MASK | CR0_WP_MASK | CR0_NE_MASK \ (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE \
| CR0_NW_MASK | CR0_CD_MASK) | X86_CR0_NW | X86_CR0_CD)
#define KVM_VM_CR0_ALWAYS_ON \ #define KVM_VM_CR0_ALWAYS_ON \
(CR0_PG_MASK | CR0_PE_MASK | CR0_WP_MASK | CR0_NE_MASK | CR0_TS_MASK \ (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE | X86_CR0_TS \
| CR0_MP_MASK) | X86_CR0_MP)
#define KVM_GUEST_CR4_MASK \ #define KVM_GUEST_CR4_MASK \
(CR4_PSE_MASK | CR4_PAE_MASK | CR4_PGE_MASK | CR4_VMXE_MASK | CR4_VME_MASK) (CR4_PSE_MASK | CR4_PAE_MASK | CR4_PGE_MASK | CR4_VMXE_MASK | CR4_VME_MASK)
#define KVM_PMODE_VM_CR4_ALWAYS_ON (CR4_VMXE_MASK | CR4_PAE_MASK) #define KVM_PMODE_VM_CR4_ALWAYS_ON (CR4_VMXE_MASK | CR4_PAE_MASK)
...@@ -667,7 +658,7 @@ static inline int is_pse(struct kvm_vcpu *vcpu) ...@@ -667,7 +658,7 @@ static inline int is_pse(struct kvm_vcpu *vcpu)
static inline int is_paging(struct kvm_vcpu *vcpu) static inline int is_paging(struct kvm_vcpu *vcpu)
{ {
return vcpu->cr0 & CR0_PG_MASK; return vcpu->cr0 & X86_CR0_PG;
} }
static inline int memslot_id(struct kvm *kvm, struct kvm_memory_slot *slot) static inline int memslot_id(struct kvm *kvm, struct kvm_memory_slot *slot)
......
...@@ -82,7 +82,10 @@ static struct dentry *debugfs_dir; ...@@ -82,7 +82,10 @@ static struct dentry *debugfs_dir;
#define MAX_IO_MSRS 256 #define MAX_IO_MSRS 256
#define CR0_RESEVED_BITS 0xffffffff1ffaffc0ULL #define CR0_RESERVED_BITS \
(~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
| X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
| X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
#define LMSW_GUEST_MASK 0x0eULL #define LMSW_GUEST_MASK 0x0eULL
#define CR4_RESEVED_BITS (~((1ULL << 11) - 1)) #define CR4_RESEVED_BITS (~((1ULL << 11) - 1))
#define CR8_RESEVED_BITS (~0x0fULL) #define CR8_RESEVED_BITS (~0x0fULL)
...@@ -466,27 +469,27 @@ static int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3) ...@@ -466,27 +469,27 @@ static int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
{ {
if (cr0 & CR0_RESEVED_BITS) { if (cr0 & CR0_RESERVED_BITS) {
printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n", printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
cr0, vcpu->cr0); cr0, vcpu->cr0);
inject_gp(vcpu); inject_gp(vcpu);
return; return;
} }
if ((cr0 & CR0_NW_MASK) && !(cr0 & CR0_CD_MASK)) { if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n"); printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
inject_gp(vcpu); inject_gp(vcpu);
return; return;
} }
if ((cr0 & CR0_PG_MASK) && !(cr0 & CR0_PE_MASK)) { if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) {
printk(KERN_DEBUG "set_cr0: #GP, set PG flag " printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
"and a clear PE flag\n"); "and a clear PE flag\n");
inject_gp(vcpu); inject_gp(vcpu);
return; return;
} }
if (!is_paging(vcpu) && (cr0 & CR0_PG_MASK)) { if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
if ((vcpu->shadow_efer & EFER_LME)) { if ((vcpu->shadow_efer & EFER_LME)) {
int cs_db, cs_l; int cs_db, cs_l;
...@@ -1158,7 +1161,7 @@ int emulate_clts(struct kvm_vcpu *vcpu) ...@@ -1158,7 +1161,7 @@ int emulate_clts(struct kvm_vcpu *vcpu)
{ {
unsigned long cr0; unsigned long cr0;
cr0 = vcpu->cr0 & ~CR0_TS_MASK; cr0 = vcpu->cr0 & ~X86_CR0_TS;
kvm_arch_ops->set_cr0(vcpu, cr0); kvm_arch_ops->set_cr0(vcpu, cr0);
return X86EMUL_CONTINUE; return X86EMUL_CONTINUE;
} }
......
...@@ -158,7 +158,7 @@ static struct kmem_cache *mmu_page_header_cache; ...@@ -158,7 +158,7 @@ static struct kmem_cache *mmu_page_header_cache;
static int is_write_protection(struct kvm_vcpu *vcpu) static int is_write_protection(struct kvm_vcpu *vcpu)
{ {
return vcpu->cr0 & CR0_WP_MASK; return vcpu->cr0 & X86_CR0_WP;
} }
static int is_cpuid_PSE36(void) static int is_cpuid_PSE36(void)
......
...@@ -99,7 +99,7 @@ static unsigned get_addr_size(struct kvm_vcpu *vcpu) ...@@ -99,7 +99,7 @@ static unsigned get_addr_size(struct kvm_vcpu *vcpu)
struct vmcb_save_area *sa = &vcpu->svm->vmcb->save; struct vmcb_save_area *sa = &vcpu->svm->vmcb->save;
u16 cs_attrib; u16 cs_attrib;
if (!(sa->cr0 & CR0_PE_MASK) || (sa->rflags & X86_EFLAGS_VM)) if (!(sa->cr0 & X86_CR0_PE) || (sa->rflags & X86_EFLAGS_VM))
return 2; return 2;
cs_attrib = sa->cs.attrib; cs_attrib = sa->cs.attrib;
...@@ -563,7 +563,7 @@ static void init_vmcb(struct vmcb *vmcb) ...@@ -563,7 +563,7 @@ static void init_vmcb(struct vmcb *vmcb)
* cr0 val on cpu init should be 0x60000010, we enable cpu * cr0 val on cpu init should be 0x60000010, we enable cpu
* cache by default. the orderly way is to enable cache in bios. * cache by default. the orderly way is to enable cache in bios.
*/ */
save->cr0 = 0x00000010 | CR0_PG_MASK | CR0_WP_MASK; save->cr0 = 0x00000010 | X86_CR0_PG | X86_CR0_WP;
save->cr4 = CR4_PAE_MASK; save->cr4 = CR4_PAE_MASK;
/* rdx = ?? */ /* rdx = ?? */
} }
...@@ -756,25 +756,25 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) ...@@ -756,25 +756,25 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
{ {
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
if (vcpu->shadow_efer & KVM_EFER_LME) { if (vcpu->shadow_efer & KVM_EFER_LME) {
if (!is_paging(vcpu) && (cr0 & CR0_PG_MASK)) { if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
vcpu->shadow_efer |= KVM_EFER_LMA; vcpu->shadow_efer |= KVM_EFER_LMA;
vcpu->svm->vmcb->save.efer |= KVM_EFER_LMA | KVM_EFER_LME; vcpu->svm->vmcb->save.efer |= KVM_EFER_LMA | KVM_EFER_LME;
} }
if (is_paging(vcpu) && !(cr0 & CR0_PG_MASK) ) { if (is_paging(vcpu) && !(cr0 & X86_CR0_PG) ) {
vcpu->shadow_efer &= ~KVM_EFER_LMA; vcpu->shadow_efer &= ~KVM_EFER_LMA;
vcpu->svm->vmcb->save.efer &= ~(KVM_EFER_LMA | KVM_EFER_LME); vcpu->svm->vmcb->save.efer &= ~(KVM_EFER_LMA | KVM_EFER_LME);
} }
} }
#endif #endif
if ((vcpu->cr0 & CR0_TS_MASK) && !(cr0 & CR0_TS_MASK)) { if ((vcpu->cr0 & X86_CR0_TS) && !(cr0 & X86_CR0_TS)) {
vcpu->svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); vcpu->svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
vcpu->fpu_active = 1; vcpu->fpu_active = 1;
} }
vcpu->cr0 = cr0; vcpu->cr0 = cr0;
cr0 |= CR0_PG_MASK | CR0_WP_MASK; cr0 |= X86_CR0_PG | X86_CR0_WP;
cr0 &= ~(CR0_CD_MASK | CR0_NW_MASK); cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
vcpu->svm->vmcb->save.cr0 = cr0; vcpu->svm->vmcb->save.cr0 = cr0;
} }
...@@ -945,8 +945,8 @@ static int pf_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -945,8 +945,8 @@ static int pf_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
static int nm_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) static int nm_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{ {
vcpu->svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); vcpu->svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
if (!(vcpu->cr0 & CR0_TS_MASK)) if (!(vcpu->cr0 & X86_CR0_TS))
vcpu->svm->vmcb->save.cr0 &= ~CR0_TS_MASK; vcpu->svm->vmcb->save.cr0 &= ~X86_CR0_TS;
vcpu->fpu_active = 1; vcpu->fpu_active = 1;
return 1; return 1;
...@@ -1702,7 +1702,7 @@ static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root) ...@@ -1702,7 +1702,7 @@ static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
if (vcpu->fpu_active) { if (vcpu->fpu_active) {
vcpu->svm->vmcb->control.intercept_exceptions |= (1 << NM_VECTOR); vcpu->svm->vmcb->control.intercept_exceptions |= (1 << NM_VECTOR);
vcpu->svm->vmcb->save.cr0 |= CR0_TS_MASK; vcpu->svm->vmcb->save.cr0 |= X86_CR0_TS;
vcpu->fpu_active = 0; vcpu->fpu_active = 0;
} }
} }
......
...@@ -436,9 +436,9 @@ static void vmx_fpu_activate(struct kvm_vcpu *vcpu) ...@@ -436,9 +436,9 @@ static void vmx_fpu_activate(struct kvm_vcpu *vcpu)
if (vcpu->fpu_active) if (vcpu->fpu_active)
return; return;
vcpu->fpu_active = 1; vcpu->fpu_active = 1;
vmcs_clear_bits(GUEST_CR0, CR0_TS_MASK); vmcs_clear_bits(GUEST_CR0, X86_CR0_TS);
if (vcpu->cr0 & CR0_TS_MASK) if (vcpu->cr0 & X86_CR0_TS)
vmcs_set_bits(GUEST_CR0, CR0_TS_MASK); vmcs_set_bits(GUEST_CR0, X86_CR0_TS);
update_exception_bitmap(vcpu); update_exception_bitmap(vcpu);
} }
...@@ -447,7 +447,7 @@ static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu) ...@@ -447,7 +447,7 @@ static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
if (!vcpu->fpu_active) if (!vcpu->fpu_active)
return; return;
vcpu->fpu_active = 0; vcpu->fpu_active = 0;
vmcs_set_bits(GUEST_CR0, CR0_TS_MASK); vmcs_set_bits(GUEST_CR0, X86_CR0_TS);
update_exception_bitmap(vcpu); update_exception_bitmap(vcpu);
} }
...@@ -1002,17 +1002,17 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) ...@@ -1002,17 +1002,17 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
{ {
vmx_fpu_deactivate(vcpu); vmx_fpu_deactivate(vcpu);
if (vcpu->rmode.active && (cr0 & CR0_PE_MASK)) if (vcpu->rmode.active && (cr0 & X86_CR0_PE))
enter_pmode(vcpu); enter_pmode(vcpu);
if (!vcpu->rmode.active && !(cr0 & CR0_PE_MASK)) if (!vcpu->rmode.active && !(cr0 & X86_CR0_PE))
enter_rmode(vcpu); enter_rmode(vcpu);
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
if (vcpu->shadow_efer & EFER_LME) { if (vcpu->shadow_efer & EFER_LME) {
if (!is_paging(vcpu) && (cr0 & CR0_PG_MASK)) if (!is_paging(vcpu) && (cr0 & X86_CR0_PG))
enter_lmode(vcpu); enter_lmode(vcpu);
if (is_paging(vcpu) && !(cr0 & CR0_PG_MASK)) if (is_paging(vcpu) && !(cr0 & X86_CR0_PG))
exit_lmode(vcpu); exit_lmode(vcpu);
} }
#endif #endif
...@@ -1022,14 +1022,14 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) ...@@ -1022,14 +1022,14 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
(cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON); (cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON);
vcpu->cr0 = cr0; vcpu->cr0 = cr0;
if (!(cr0 & CR0_TS_MASK) || !(cr0 & CR0_PE_MASK)) if (!(cr0 & X86_CR0_TS) || !(cr0 & X86_CR0_PE))
vmx_fpu_activate(vcpu); vmx_fpu_activate(vcpu);
} }
static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
{ {
vmcs_writel(GUEST_CR3, cr3); vmcs_writel(GUEST_CR3, cr3);
if (vcpu->cr0 & CR0_PE_MASK) if (vcpu->cr0 & X86_CR0_PE)
vmx_fpu_deactivate(vcpu); vmx_fpu_deactivate(vcpu);
} }
...@@ -1778,7 +1778,7 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -1778,7 +1778,7 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
case 2: /* clts */ case 2: /* clts */
vcpu_load_rsp_rip(vcpu); vcpu_load_rsp_rip(vcpu);
vmx_fpu_deactivate(vcpu); vmx_fpu_deactivate(vcpu);
vcpu->cr0 &= ~CR0_TS_MASK; vcpu->cr0 &= ~X86_CR0_TS;
vmcs_writel(CR0_READ_SHADOW, vcpu->cr0); vmcs_writel(CR0_READ_SHADOW, vcpu->cr0);
vmx_fpu_activate(vcpu); vmx_fpu_activate(vcpu);
skip_emulated_instruction(vcpu); skip_emulated_instruction(vcpu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment