Commit fc78f519 authored by Avi Kivity's avatar Avi Kivity Committed by Marcelo Tosatti

KVM: Add accessor for reading cr4 (or some bits of cr4)

Some bits of cr4 can be owned by the guest on vmx, so when we read them,
we copy them to the vcpu structure.  In preparation for making the set of
guest-owned bits dynamic, use helpers to access these bits so we don't need
to know where the bit resides.

No changes to svm since all bits are host-owned there.
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent cdc0e244
...@@ -272,6 +272,7 @@ struct kvm_vcpu_arch { ...@@ -272,6 +272,7 @@ struct kvm_vcpu_arch {
unsigned long cr2; unsigned long cr2;
unsigned long cr3; unsigned long cr3;
unsigned long cr4; unsigned long cr4;
unsigned long cr4_guest_owned_bits;
unsigned long cr8; unsigned long cr8;
u32 hflags; u32 hflags;
u64 pdptrs[4]; /* pae */ u64 pdptrs[4]; /* pae */
......
...@@ -38,4 +38,16 @@ static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index) ...@@ -38,4 +38,16 @@ static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
return vcpu->arch.pdptrs[index]; return vcpu->arch.pdptrs[index];
} }
static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
{
if (mask & vcpu->arch.cr4_guest_owned_bits)
kvm_x86_ops->decache_cr4_guest_bits(vcpu);
return vcpu->arch.cr4 & mask;
}
static inline ulong kvm_read_cr4(struct kvm_vcpu *vcpu)
{
return kvm_read_cr4_bits(vcpu, ~0UL);
}
#endif #endif
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
#define __KVM_X86_MMU_H #define __KVM_X86_MMU_H
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include "kvm_cache_regs.h"
#define PT64_PT_BITS 9 #define PT64_PT_BITS 9
#define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS) #define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
...@@ -64,12 +65,12 @@ static inline int is_long_mode(struct kvm_vcpu *vcpu) ...@@ -64,12 +65,12 @@ static inline int is_long_mode(struct kvm_vcpu *vcpu)
static inline int is_pae(struct kvm_vcpu *vcpu) static inline int is_pae(struct kvm_vcpu *vcpu)
{ {
return vcpu->arch.cr4 & X86_CR4_PAE; return kvm_read_cr4_bits(vcpu, X86_CR4_PAE);
} }
static inline int is_pse(struct kvm_vcpu *vcpu) static inline int is_pse(struct kvm_vcpu *vcpu)
{ {
return vcpu->arch.cr4 & X86_CR4_PSE; return kvm_read_cr4_bits(vcpu, X86_CR4_PSE);
} }
static inline int is_paging(struct kvm_vcpu *vcpu) static inline int is_paging(struct kvm_vcpu *vcpu)
......
...@@ -1615,8 +1615,10 @@ static void vmx_flush_tlb(struct kvm_vcpu *vcpu) ...@@ -1615,8 +1615,10 @@ static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
{ {
vcpu->arch.cr4 &= KVM_GUEST_CR4_MASK; ulong cr4_guest_owned_bits = vcpu->arch.cr4_guest_owned_bits;
vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & ~KVM_GUEST_CR4_MASK;
vcpu->arch.cr4 &= ~cr4_guest_owned_bits;
vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & cr4_guest_owned_bits;
} }
static void ept_load_pdptrs(struct kvm_vcpu *vcpu) static void ept_load_pdptrs(struct kvm_vcpu *vcpu)
...@@ -1661,7 +1663,7 @@ static void ept_update_paging_mode_cr0(unsigned long *hw_cr0, ...@@ -1661,7 +1663,7 @@ static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
(CPU_BASED_CR3_LOAD_EXITING | (CPU_BASED_CR3_LOAD_EXITING |
CPU_BASED_CR3_STORE_EXITING)); CPU_BASED_CR3_STORE_EXITING));
vcpu->arch.cr0 = cr0; vcpu->arch.cr0 = cr0;
vmx_set_cr4(vcpu, vcpu->arch.cr4); vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
} else if (!is_paging(vcpu)) { } else if (!is_paging(vcpu)) {
/* From nonpaging to paging */ /* From nonpaging to paging */
vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
...@@ -1669,7 +1671,7 @@ static void ept_update_paging_mode_cr0(unsigned long *hw_cr0, ...@@ -1669,7 +1671,7 @@ static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
~(CPU_BASED_CR3_LOAD_EXITING | ~(CPU_BASED_CR3_LOAD_EXITING |
CPU_BASED_CR3_STORE_EXITING)); CPU_BASED_CR3_STORE_EXITING));
vcpu->arch.cr0 = cr0; vcpu->arch.cr0 = cr0;
vmx_set_cr4(vcpu, vcpu->arch.cr4); vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
} }
if (!(cr0 & X86_CR0_WP)) if (!(cr0 & X86_CR0_WP))
...@@ -2420,6 +2422,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) ...@@ -2420,6 +2422,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL); vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK); vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK);
vmx->vcpu.arch.cr4_guest_owned_bits = ~KVM_GUEST_CR4_MASK;
tsc_base = vmx->vcpu.kvm->arch.vm_init_tsc; tsc_base = vmx->vcpu.kvm->arch.vm_init_tsc;
rdtscll(tsc_this); rdtscll(tsc_this);
...@@ -3050,7 +3053,7 @@ static int handle_dr(struct kvm_vcpu *vcpu) ...@@ -3050,7 +3053,7 @@ static int handle_dr(struct kvm_vcpu *vcpu)
vcpu->arch.eff_db[dr] = val; vcpu->arch.eff_db[dr] = val;
break; break;
case 4 ... 5: case 4 ... 5:
if (vcpu->arch.cr4 & X86_CR4_DE) if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
kvm_queue_exception(vcpu, UD_VECTOR); kvm_queue_exception(vcpu, UD_VECTOR);
break; break;
case 6: case 6:
......
...@@ -482,7 +482,7 @@ EXPORT_SYMBOL_GPL(kvm_lmsw); ...@@ -482,7 +482,7 @@ EXPORT_SYMBOL_GPL(kvm_lmsw);
void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{ {
unsigned long old_cr4 = vcpu->arch.cr4; unsigned long old_cr4 = kvm_read_cr4(vcpu);
unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE; unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE;
if (cr4 & CR4_RESERVED_BITS) { if (cr4 & CR4_RESERVED_BITS) {
...@@ -1899,7 +1899,7 @@ static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu, ...@@ -1899,7 +1899,7 @@ static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
return 0; return 0;
if (mce->status & MCI_STATUS_UC) { if (mce->status & MCI_STATUS_UC) {
if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) || if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) ||
!(vcpu->arch.cr4 & X86_CR4_MCE)) { !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) {
printk(KERN_DEBUG "kvm: set_mce: " printk(KERN_DEBUG "kvm: set_mce: "
"injects mce exception while " "injects mce exception while "
"previous one is in progress!\n"); "previous one is in progress!\n");
...@@ -3616,7 +3616,6 @@ unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr) ...@@ -3616,7 +3616,6 @@ unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
{ {
unsigned long value; unsigned long value;
kvm_x86_ops->decache_cr4_guest_bits(vcpu);
switch (cr) { switch (cr) {
case 0: case 0:
value = vcpu->arch.cr0; value = vcpu->arch.cr0;
...@@ -3628,7 +3627,7 @@ unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr) ...@@ -3628,7 +3627,7 @@ unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
value = vcpu->arch.cr3; value = vcpu->arch.cr3;
break; break;
case 4: case 4:
value = vcpu->arch.cr4; value = kvm_read_cr4(vcpu);
break; break;
case 8: case 8:
value = kvm_get_cr8(vcpu); value = kvm_get_cr8(vcpu);
...@@ -3656,7 +3655,7 @@ void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val, ...@@ -3656,7 +3655,7 @@ void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
kvm_set_cr3(vcpu, val); kvm_set_cr3(vcpu, val);
break; break;
case 4: case 4:
kvm_set_cr4(vcpu, mk_cr_64(vcpu->arch.cr4, val)); kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val));
break; break;
case 8: case 8:
kvm_set_cr8(vcpu, val & 0xfUL); kvm_set_cr8(vcpu, val & 0xfUL);
...@@ -4237,11 +4236,10 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, ...@@ -4237,11 +4236,10 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
sregs->gdt.limit = dt.limit; sregs->gdt.limit = dt.limit;
sregs->gdt.base = dt.base; sregs->gdt.base = dt.base;
kvm_x86_ops->decache_cr4_guest_bits(vcpu);
sregs->cr0 = vcpu->arch.cr0; sregs->cr0 = vcpu->arch.cr0;
sregs->cr2 = vcpu->arch.cr2; sregs->cr2 = vcpu->arch.cr2;
sregs->cr3 = vcpu->arch.cr3; sregs->cr3 = vcpu->arch.cr3;
sregs->cr4 = vcpu->arch.cr4; sregs->cr4 = kvm_read_cr4(vcpu);
sregs->cr8 = kvm_get_cr8(vcpu); sregs->cr8 = kvm_get_cr8(vcpu);
sregs->efer = vcpu->arch.shadow_efer; sregs->efer = vcpu->arch.shadow_efer;
sregs->apic_base = kvm_get_apic_base(vcpu); sregs->apic_base = kvm_get_apic_base(vcpu);
...@@ -4737,13 +4735,11 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, ...@@ -4737,13 +4735,11 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
kvm_x86_ops->set_efer(vcpu, sregs->efer); kvm_x86_ops->set_efer(vcpu, sregs->efer);
kvm_set_apic_base(vcpu, sregs->apic_base); kvm_set_apic_base(vcpu, sregs->apic_base);
kvm_x86_ops->decache_cr4_guest_bits(vcpu);
mmu_reset_needed |= vcpu->arch.cr0 != sregs->cr0; mmu_reset_needed |= vcpu->arch.cr0 != sregs->cr0;
kvm_x86_ops->set_cr0(vcpu, sregs->cr0); kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
vcpu->arch.cr0 = sregs->cr0; vcpu->arch.cr0 = sregs->cr0;
mmu_reset_needed |= vcpu->arch.cr4 != sregs->cr4; mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
kvm_x86_ops->set_cr4(vcpu, sregs->cr4); kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
if (!is_long_mode(vcpu) && is_pae(vcpu)) { if (!is_long_mode(vcpu) && is_pae(vcpu)) {
load_pdptrs(vcpu, vcpu->arch.cr3); load_pdptrs(vcpu, vcpu->arch.cr3);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment