Commit 7725b894 authored by Dongxiao Xu's avatar Dongxiao Xu Committed by Avi Kivity

KVM: VMX: Define new functions to wrapper direct call of asm code

Define vmcs_load() and kvm_cpu_vmxon() to avoid direct call of asm
code. Also move VMXE bit operation out of kvm_cpu_vmxoff().
Signed-off-by: default avatarDongxiao Xu <dongxiao.xu@intel.com>
Signed-off-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
parent 6859762e
...@@ -453,6 +453,19 @@ static void vmcs_clear(struct vmcs *vmcs) ...@@ -453,6 +453,19 @@ static void vmcs_clear(struct vmcs *vmcs)
vmcs, phys_addr); vmcs, phys_addr);
} }
static void vmcs_load(struct vmcs *vmcs)
{
u64 phys_addr = __pa(vmcs);
u8 error;
asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) "; setna %0"
: "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
: "cc", "memory");
if (error)
printk(KERN_ERR "kvm: vmptrld %p/%llx fail\n",
vmcs, phys_addr);
}
static void __vcpu_clear(void *arg) static void __vcpu_clear(void *arg)
{ {
struct vcpu_vmx *vmx = arg; struct vcpu_vmx *vmx = arg;
...@@ -830,7 +843,6 @@ static void vmx_load_host_state(struct vcpu_vmx *vmx) ...@@ -830,7 +843,6 @@ static void vmx_load_host_state(struct vcpu_vmx *vmx)
static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
u64 phys_addr = __pa(vmx->vmcs);
u64 tsc_this, delta, new_offset; u64 tsc_this, delta, new_offset;
if (vcpu->cpu != cpu) { if (vcpu->cpu != cpu) {
...@@ -844,15 +856,8 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) ...@@ -844,15 +856,8 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
} }
if (per_cpu(current_vmcs, cpu) != vmx->vmcs) { if (per_cpu(current_vmcs, cpu) != vmx->vmcs) {
u8 error;
per_cpu(current_vmcs, cpu) = vmx->vmcs; per_cpu(current_vmcs, cpu) = vmx->vmcs;
asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) "; setna %0" vmcs_load(vmx->vmcs);
: "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
: "cc");
if (error)
printk(KERN_ERR "kvm: vmptrld %p/%llx fail\n",
vmx->vmcs, phys_addr);
} }
if (vcpu->cpu != cpu) { if (vcpu->cpu != cpu) {
...@@ -1288,6 +1293,13 @@ static __init int vmx_disabled_by_bios(void) ...@@ -1288,6 +1293,13 @@ static __init int vmx_disabled_by_bios(void)
/* locked but not enabled */ /* locked but not enabled */
} }
static void kvm_cpu_vmxon(u64 addr)
{
asm volatile (ASM_VMX_VMXON_RAX
: : "a"(&addr), "m"(addr)
: "memory", "cc");
}
static int hardware_enable(void *garbage) static int hardware_enable(void *garbage)
{ {
int cpu = raw_smp_processor_id(); int cpu = raw_smp_processor_id();
...@@ -1310,9 +1322,7 @@ static int hardware_enable(void *garbage) ...@@ -1310,9 +1322,7 @@ static int hardware_enable(void *garbage)
wrmsrl(MSR_IA32_FEATURE_CONTROL, old | test_bits); wrmsrl(MSR_IA32_FEATURE_CONTROL, old | test_bits);
} }
write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */ write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */
asm volatile (ASM_VMX_VMXON_RAX kvm_cpu_vmxon(phys_addr);
: : "a"(&phys_addr), "m"(phys_addr)
: "memory", "cc");
ept_sync_global(); ept_sync_global();
...@@ -1336,13 +1346,13 @@ static void vmclear_local_vcpus(void) ...@@ -1336,13 +1346,13 @@ static void vmclear_local_vcpus(void)
static void kvm_cpu_vmxoff(void) static void kvm_cpu_vmxoff(void)
{ {
asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc"); asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc");
write_cr4(read_cr4() & ~X86_CR4_VMXE);
} }
static void hardware_disable(void *garbage) static void hardware_disable(void *garbage)
{ {
vmclear_local_vcpus(); vmclear_local_vcpus();
kvm_cpu_vmxoff(); kvm_cpu_vmxoff();
write_cr4(read_cr4() & ~X86_CR4_VMXE);
} }
static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt, static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment