Commit 019960ae authored by Avi Kivity's avatar Avi Kivity

KVM: VMX: Don't adjust tsc offset forward

Most Intel hosts have a stable tsc, and playing with the offset only
reduces accuracy.  By limiting tsc offset adjustment only to forward updates,
we effectively disable tsc offset adjustment on these hosts.
Signed-off-by: default avatarAvi Kivity <avi@qumranet.com>
parent b8688d51
...@@ -519,7 +519,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) ...@@ -519,7 +519,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
u64 phys_addr = __pa(vmx->vmcs); u64 phys_addr = __pa(vmx->vmcs);
u64 tsc_this, delta; u64 tsc_this, delta, new_offset;
if (vcpu->cpu != cpu) { if (vcpu->cpu != cpu) {
vcpu_clear(vmx); vcpu_clear(vmx);
...@@ -559,8 +559,11 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) ...@@ -559,8 +559,11 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
* Make sure the time stamp counter is monotonous. * Make sure the time stamp counter is monotonous.
*/ */
rdtscll(tsc_this); rdtscll(tsc_this);
delta = vcpu->arch.host_tsc - tsc_this; if (tsc_this < vcpu->arch.host_tsc) {
vmcs_write64(TSC_OFFSET, vmcs_read64(TSC_OFFSET) + delta); delta = vcpu->arch.host_tsc - tsc_this;
new_offset = vmcs_read64(TSC_OFFSET) + delta;
vmcs_write64(TSC_OFFSET, new_offset);
}
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment