Commit 759379dd authored by Zachary Amsden's avatar Zachary Amsden Committed by Avi Kivity

KVM: x86: Add helper functions for time computation

Add a helper function to compute the kernel time and convert nanoseconds
back to CPU specific cycles.  Note that these must not be called in preemptible
context, as that would mean the kernel could enter software suspend state,
which would cause non-atomic operation.

Also, convert the KVM_SET_CLOCK / KVM_GET_CLOCK ioctls to use the kernel
time helper, these should be bootbased as well.
Signed-off-by: default avatarZachary Amsden <zamsden@redhat.com>
Signed-off-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
parent 48434c20
...@@ -893,6 +893,16 @@ static void kvm_set_time_scale(uint32_t tsc_khz, struct pvclock_vcpu_time_info * ...@@ -893,6 +893,16 @@ static void kvm_set_time_scale(uint32_t tsc_khz, struct pvclock_vcpu_time_info *
hv_clock->tsc_to_system_mul); hv_clock->tsc_to_system_mul);
} }
static inline u64 get_kernel_ns(void)
{
struct timespec ts;
WARN_ON(preemptible());
ktime_get_ts(&ts);
monotonic_to_bootbased(&ts);
return timespec_to_ns(&ts);
}
static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz); static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
static inline int kvm_tsc_changes_freq(void) static inline int kvm_tsc_changes_freq(void)
...@@ -904,18 +914,24 @@ static inline int kvm_tsc_changes_freq(void) ...@@ -904,18 +914,24 @@ static inline int kvm_tsc_changes_freq(void)
return ret; return ret;
} }
static inline u64 nsec_to_cycles(u64 nsec)
{
WARN_ON(preemptible());
if (kvm_tsc_changes_freq())
printk_once(KERN_WARNING
"kvm: unreliable cycle conversion on adjustable rate TSC\n");
return (nsec * __get_cpu_var(cpu_tsc_khz)) / USEC_PER_SEC;
}
void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data) void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
{ {
struct kvm *kvm = vcpu->kvm; struct kvm *kvm = vcpu->kvm;
u64 offset, ns, elapsed; u64 offset, ns, elapsed;
unsigned long flags; unsigned long flags;
struct timespec ts;
spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
offset = data - native_read_tsc(); offset = data - native_read_tsc();
ktime_get_ts(&ts); ns = get_kernel_ns();
monotonic_to_bootbased(&ts);
ns = timespec_to_ns(&ts);
elapsed = ns - kvm->arch.last_tsc_nsec; elapsed = ns - kvm->arch.last_tsc_nsec;
/* /*
...@@ -931,10 +947,9 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data) ...@@ -931,10 +947,9 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
offset = kvm->arch.last_tsc_offset; offset = kvm->arch.last_tsc_offset;
pr_debug("kvm: matched tsc offset for %llu\n", data); pr_debug("kvm: matched tsc offset for %llu\n", data);
} else { } else {
u64 tsc_delta = elapsed * __get_cpu_var(cpu_tsc_khz); u64 delta = nsec_to_cycles(elapsed);
tsc_delta = tsc_delta / USEC_PER_SEC; offset += delta;
offset += tsc_delta; pr_debug("kvm: adjusted tsc offset by %llu\n", delta);
pr_debug("kvm: adjusted tsc offset by %llu\n", tsc_delta);
} }
ns = kvm->arch.last_tsc_nsec; ns = kvm->arch.last_tsc_nsec;
} }
...@@ -951,11 +966,11 @@ EXPORT_SYMBOL_GPL(kvm_write_tsc); ...@@ -951,11 +966,11 @@ EXPORT_SYMBOL_GPL(kvm_write_tsc);
static int kvm_write_guest_time(struct kvm_vcpu *v) static int kvm_write_guest_time(struct kvm_vcpu *v)
{ {
struct timespec ts;
unsigned long flags; unsigned long flags;
struct kvm_vcpu_arch *vcpu = &v->arch; struct kvm_vcpu_arch *vcpu = &v->arch;
void *shared_kaddr; void *shared_kaddr;
unsigned long this_tsc_khz; unsigned long this_tsc_khz;
s64 kernel_ns;
if ((!vcpu->time_page)) if ((!vcpu->time_page))
return 0; return 0;
...@@ -963,8 +978,7 @@ static int kvm_write_guest_time(struct kvm_vcpu *v) ...@@ -963,8 +978,7 @@ static int kvm_write_guest_time(struct kvm_vcpu *v)
/* Keep irq disabled to prevent changes to the clock */ /* Keep irq disabled to prevent changes to the clock */
local_irq_save(flags); local_irq_save(flags);
kvm_get_msr(v, MSR_IA32_TSC, &vcpu->hv_clock.tsc_timestamp); kvm_get_msr(v, MSR_IA32_TSC, &vcpu->hv_clock.tsc_timestamp);
ktime_get_ts(&ts); kernel_ns = get_kernel_ns();
monotonic_to_bootbased(&ts);
this_tsc_khz = __get_cpu_var(cpu_tsc_khz); this_tsc_khz = __get_cpu_var(cpu_tsc_khz);
local_irq_restore(flags); local_irq_restore(flags);
...@@ -979,9 +993,7 @@ static int kvm_write_guest_time(struct kvm_vcpu *v) ...@@ -979,9 +993,7 @@ static int kvm_write_guest_time(struct kvm_vcpu *v)
} }
/* With all the info we got, fill in the values */ /* With all the info we got, fill in the values */
vcpu->hv_clock.system_time = ts.tv_nsec + vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset;
(NSEC_PER_SEC * (u64)ts.tv_sec) + v->kvm->arch.kvmclock_offset;
vcpu->hv_clock.flags = 0; vcpu->hv_clock.flags = 0;
/* /*
...@@ -3263,7 +3275,6 @@ long kvm_arch_vm_ioctl(struct file *filp, ...@@ -3263,7 +3275,6 @@ long kvm_arch_vm_ioctl(struct file *filp,
break; break;
} }
case KVM_SET_CLOCK: { case KVM_SET_CLOCK: {
struct timespec now;
struct kvm_clock_data user_ns; struct kvm_clock_data user_ns;
u64 now_ns; u64 now_ns;
s64 delta; s64 delta;
...@@ -3277,19 +3288,16 @@ long kvm_arch_vm_ioctl(struct file *filp, ...@@ -3277,19 +3288,16 @@ long kvm_arch_vm_ioctl(struct file *filp,
goto out; goto out;
r = 0; r = 0;
ktime_get_ts(&now); now_ns = get_kernel_ns();
now_ns = timespec_to_ns(&now);
delta = user_ns.clock - now_ns; delta = user_ns.clock - now_ns;
kvm->arch.kvmclock_offset = delta; kvm->arch.kvmclock_offset = delta;
break; break;
} }
case KVM_GET_CLOCK: { case KVM_GET_CLOCK: {
struct timespec now;
struct kvm_clock_data user_ns; struct kvm_clock_data user_ns;
u64 now_ns; u64 now_ns;
ktime_get_ts(&now); now_ns = get_kernel_ns();
now_ns = timespec_to_ns(&now);
user_ns.clock = kvm->arch.kvmclock_offset + now_ns; user_ns.clock = kvm->arch.kvmclock_offset + now_ns;
user_ns.flags = 0; user_ns.flags = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment