Commit 9e793c5e authored by Andy Lutomirski's avatar Andy Lutomirski Committed by Ben Hutchings

x86,kvm,vmx: Preserve CR4 across VM entry

commit d974baa3 upstream.

CR4 isn't constant; at least the TSD and PCE bits can vary.

TBH, treating CR0 and CR3 as constant scares me a bit, too, but it looks
like it's correct.

This adds a branch and a read from cr4 to each vm entry.  Because it is
extremely likely that consecutive entries into the same vcpu will have
the same host cr4 value, this fixes up the vmcs instead of restoring cr4
after the fact.  A subsequent patch will add a kernel-wide cr4 shadow,
reducing the overhead in the common case to just two memory reads and a
branch.
Signed-off-by: default avatarAndy Lutomirski <luto@amacapital.net>
Acked-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Cc: stable@vger.kernel.org
Cc: Petr Matousek <pmatouse@redhat.com>
Cc: Gleb Natapov <gleb@kernel.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
[bwh: Backported to 3.2:
 - Adjust context
 - Add struct vcpu_vmx *vmx parameter to vmx_set_constant_host_state(), done
   upstream in commit a547c6db ("KVM: VMX: Enable acknowledge interupt
   on vmexit")]
Signed-off-by: default avatarBen Hutchings <ben@decadent.org.uk>
parent 3a8c709b
...@@ -390,6 +390,7 @@ struct vcpu_vmx { ...@@ -390,6 +390,7 @@ struct vcpu_vmx {
u16 fs_sel, gs_sel, ldt_sel; u16 fs_sel, gs_sel, ldt_sel;
int gs_ldt_reload_needed; int gs_ldt_reload_needed;
int fs_reload_needed; int fs_reload_needed;
unsigned long vmcs_host_cr4; /* May not match real cr4 */
} host_state; } host_state;
struct { struct {
int vm86_active; int vm86_active;
...@@ -3629,16 +3630,21 @@ static void vmx_disable_intercept_for_msr(u32 msr, bool longmode_only) ...@@ -3629,16 +3630,21 @@ static void vmx_disable_intercept_for_msr(u32 msr, bool longmode_only)
* Note that host-state that does change is set elsewhere. E.g., host-state * Note that host-state that does change is set elsewhere. E.g., host-state
* that is set differently for each CPU is set in vmx_vcpu_load(), not here. * that is set differently for each CPU is set in vmx_vcpu_load(), not here.
*/ */
static void vmx_set_constant_host_state(void) static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
{ {
u32 low32, high32; u32 low32, high32;
unsigned long tmpl; unsigned long tmpl;
struct desc_ptr dt; struct desc_ptr dt;
unsigned long cr4;
vmcs_writel(HOST_CR0, read_cr0() | X86_CR0_TS); /* 22.2.3 */ vmcs_writel(HOST_CR0, read_cr0() | X86_CR0_TS); /* 22.2.3 */
vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
/* Save the most likely value for this task's CR4 in the VMCS. */
cr4 = read_cr4();
vmcs_writel(HOST_CR4, cr4); /* 22.2.3, 22.2.5 */
vmx->host_state.vmcs_host_cr4 = cr4;
vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */ vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */ vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */
...@@ -3760,7 +3766,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) ...@@ -3760,7 +3766,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
vmcs_write16(HOST_FS_SELECTOR, 0); /* 22.2.4 */ vmcs_write16(HOST_FS_SELECTOR, 0); /* 22.2.4 */
vmcs_write16(HOST_GS_SELECTOR, 0); /* 22.2.4 */ vmcs_write16(HOST_GS_SELECTOR, 0); /* 22.2.4 */
vmx_set_constant_host_state(); vmx_set_constant_host_state(vmx);
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
rdmsrl(MSR_FS_BASE, a); rdmsrl(MSR_FS_BASE, a);
vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */ vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */
...@@ -6108,6 +6114,7 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx) ...@@ -6108,6 +6114,7 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
unsigned long cr4;
if (is_guest_mode(vcpu) && !vmx->nested.nested_run_pending) { if (is_guest_mode(vcpu) && !vmx->nested.nested_run_pending) {
struct vmcs12 *vmcs12 = get_vmcs12(vcpu); struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
...@@ -6138,6 +6145,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -6138,6 +6145,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty)) if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]); vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
cr4 = read_cr4();
if (unlikely(cr4 != vmx->host_state.vmcs_host_cr4)) {
vmcs_writel(HOST_CR4, cr4);
vmx->host_state.vmcs_host_cr4 = cr4;
}
/* When single-stepping over STI and MOV SS, we must clear the /* When single-stepping over STI and MOV SS, we must clear the
* corresponding interruptibility bits in the guest state. Otherwise * corresponding interruptibility bits in the guest state. Otherwise
* vmentry fails as it then expects bit 14 (BS) in pending debug * vmentry fails as it then expects bit 14 (BS) in pending debug
...@@ -6596,7 +6609,7 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) ...@@ -6596,7 +6609,7 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
* Other fields are different per CPU, and will be set later when * Other fields are different per CPU, and will be set later when
* vmx_vcpu_load() is called, and when vmx_save_host_state() is called. * vmx_vcpu_load() is called, and when vmx_save_host_state() is called.
*/ */
vmx_set_constant_host_state(); vmx_set_constant_host_state(vmx);
/* /*
* HOST_RSP is normally set correctly in vmx_vcpu_run() just before * HOST_RSP is normally set correctly in vmx_vcpu_run() just before
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment