Commit 35181e86 authored by Haozhong Zhang's avatar Haozhong Zhang Committed by Paolo Bonzini

KVM: x86: Add a common TSC scaling function

VMX and SVM calculate the TSC scaling ratio in a similar logic, so this
patch generalizes it to a common TSC scaling function.
Signed-off-by: default avatarHaozhong Zhang <haozhong.zhang@intel.com>
[Inline the multiplication and shift steps into mul_u64_u64_shr.  Remove
 BUG_ON.  - Paolo]
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent ad721883
...@@ -1238,6 +1238,8 @@ void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm, ...@@ -1238,6 +1238,8 @@ void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
void kvm_define_shared_msr(unsigned index, u32 msr); void kvm_define_shared_msr(unsigned index, u32 msr);
int kvm_set_shared_msr(unsigned index, u64 val, u64 mask); int kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc);
unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu); unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu);
bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip); bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
......
...@@ -212,7 +212,6 @@ static int nested_svm_intercept(struct vcpu_svm *svm); ...@@ -212,7 +212,6 @@ static int nested_svm_intercept(struct vcpu_svm *svm);
static int nested_svm_vmexit(struct vcpu_svm *svm); static int nested_svm_vmexit(struct vcpu_svm *svm);
static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr, static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
bool has_error_code, u32 error_code); bool has_error_code, u32 error_code);
static u64 __scale_tsc(u64 ratio, u64 tsc);
enum { enum {
VMCB_INTERCEPTS, /* Intercept vectors, TSC offset, VMCB_INTERCEPTS, /* Intercept vectors, TSC offset,
...@@ -892,21 +891,7 @@ static __init int svm_hardware_setup(void) ...@@ -892,21 +891,7 @@ static __init int svm_hardware_setup(void)
kvm_enable_efer_bits(EFER_FFXSR); kvm_enable_efer_bits(EFER_FFXSR);
if (boot_cpu_has(X86_FEATURE_TSCRATEMSR)) { if (boot_cpu_has(X86_FEATURE_TSCRATEMSR)) {
u64 max;
kvm_has_tsc_control = true; kvm_has_tsc_control = true;
/*
* Make sure the user can only configure tsc_khz values that
* fit into a signed integer.
* A min value is not calculated needed because it will always
* be 1 on all machines and a value of 0 is used to disable
* tsc-scaling for the vcpu.
*/
max = min(0x7fffffffULL, __scale_tsc(tsc_khz, TSC_RATIO_MAX));
kvm_max_guest_tsc_khz = max;
kvm_max_tsc_scaling_ratio = TSC_RATIO_MAX; kvm_max_tsc_scaling_ratio = TSC_RATIO_MAX;
kvm_tsc_scaling_ratio_frac_bits = 32; kvm_tsc_scaling_ratio_frac_bits = 32;
} }
...@@ -972,31 +957,6 @@ static void init_sys_seg(struct vmcb_seg *seg, uint32_t type) ...@@ -972,31 +957,6 @@ static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
seg->base = 0; seg->base = 0;
} }
static u64 __scale_tsc(u64 ratio, u64 tsc)
{
u64 mult, frac, _tsc;
mult = ratio >> 32;
frac = ratio & ((1ULL << 32) - 1);
_tsc = tsc;
_tsc *= mult;
_tsc += (tsc >> 32) * frac;
_tsc += ((tsc & ((1ULL << 32) - 1)) * frac) >> 32;
return _tsc;
}
static u64 svm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc)
{
u64 _tsc = tsc;
if (vcpu->arch.tsc_scaling_ratio != TSC_RATIO_DEFAULT)
_tsc = __scale_tsc(vcpu->arch.tsc_scaling_ratio, tsc);
return _tsc;
}
static void svm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale) static void svm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
{ {
u64 ratio; u64 ratio;
...@@ -1065,7 +1025,7 @@ static void svm_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment, bool ho ...@@ -1065,7 +1025,7 @@ static void svm_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment, bool ho
if (host) { if (host) {
if (vcpu->arch.tsc_scaling_ratio != TSC_RATIO_DEFAULT) if (vcpu->arch.tsc_scaling_ratio != TSC_RATIO_DEFAULT)
WARN_ON(adjustment < 0); WARN_ON(adjustment < 0);
adjustment = svm_scale_tsc(vcpu, (u64)adjustment); adjustment = kvm_scale_tsc(vcpu, (u64)adjustment);
} }
svm->vmcb->control.tsc_offset += adjustment; svm->vmcb->control.tsc_offset += adjustment;
...@@ -1083,7 +1043,7 @@ static u64 svm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc) ...@@ -1083,7 +1043,7 @@ static u64 svm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
{ {
u64 tsc; u64 tsc;
tsc = svm_scale_tsc(vcpu, rdtsc()); tsc = kvm_scale_tsc(vcpu, rdtsc());
return target_tsc - tsc; return target_tsc - tsc;
} }
...@@ -3075,7 +3035,7 @@ static u64 svm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc) ...@@ -3075,7 +3035,7 @@ static u64 svm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
{ {
struct vmcb *vmcb = get_host_vmcb(to_svm(vcpu)); struct vmcb *vmcb = get_host_vmcb(to_svm(vcpu));
return vmcb->control.tsc_offset + return vmcb->control.tsc_offset +
svm_scale_tsc(vcpu, host_tsc); kvm_scale_tsc(vcpu, host_tsc);
} }
static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
...@@ -3085,7 +3045,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -3085,7 +3045,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
switch (msr_info->index) { switch (msr_info->index) {
case MSR_IA32_TSC: { case MSR_IA32_TSC: {
msr_info->data = svm->vmcb->control.tsc_offset + msr_info->data = svm->vmcb->control.tsc_offset +
svm_scale_tsc(vcpu, rdtsc()); kvm_scale_tsc(vcpu, rdtsc());
break; break;
} }
......
...@@ -1329,6 +1329,33 @@ static void update_ia32_tsc_adjust_msr(struct kvm_vcpu *vcpu, s64 offset) ...@@ -1329,6 +1329,33 @@ static void update_ia32_tsc_adjust_msr(struct kvm_vcpu *vcpu, s64 offset)
vcpu->arch.ia32_tsc_adjust_msr += offset - curr_offset; vcpu->arch.ia32_tsc_adjust_msr += offset - curr_offset;
} }
/*
* Multiply tsc by a fixed point number represented by ratio.
*
* The most significant 64-N bits (mult) of ratio represent the
* integral part of the fixed point number; the remaining N bits
* (frac) represent the fractional part, ie. ratio represents a fixed
* point number (mult + frac * 2^(-N)).
*
* N equals to kvm_tsc_scaling_ratio_frac_bits.
*/
static inline u64 __scale_tsc(u64 ratio, u64 tsc)
{
return mul_u64_u64_shr(tsc, ratio, kvm_tsc_scaling_ratio_frac_bits);
}
u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc)
{
u64 _tsc = tsc;
u64 ratio = vcpu->arch.tsc_scaling_ratio;
if (ratio != kvm_default_tsc_scaling_ratio)
_tsc = __scale_tsc(ratio, tsc);
return _tsc;
}
EXPORT_SYMBOL_GPL(kvm_scale_tsc);
void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr) void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
{ {
struct kvm *kvm = vcpu->kvm; struct kvm *kvm = vcpu->kvm;
...@@ -7371,8 +7398,19 @@ int kvm_arch_hardware_setup(void) ...@@ -7371,8 +7398,19 @@ int kvm_arch_hardware_setup(void)
if (r != 0) if (r != 0)
return r; return r;
if (kvm_has_tsc_control) if (kvm_has_tsc_control) {
/*
* Make sure the user can only configure tsc_khz values that
* fit into a signed integer.
* A min value is not calculated needed because it will always
* be 1 on all machines.
*/
u64 max = min(0x7fffffffULL,
__scale_tsc(kvm_max_tsc_scaling_ratio, tsc_khz));
kvm_max_guest_tsc_khz = max;
kvm_default_tsc_scaling_ratio = 1ULL << kvm_tsc_scaling_ratio_frac_bits; kvm_default_tsc_scaling_ratio = 1ULL << kvm_tsc_scaling_ratio_frac_bits;
}
kvm_init_msr_list(); kvm_init_msr_list();
return 0; return 0;
......
...@@ -1183,4 +1183,5 @@ void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *); ...@@ -1183,4 +1183,5 @@ void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *);
int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq, int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
uint32_t guest_irq, bool set); uint32_t guest_irq, bool set);
#endif /* CONFIG_HAVE_KVM_IRQ_BYPASS */ #endif /* CONFIG_HAVE_KVM_IRQ_BYPASS */
#endif #endif
...@@ -142,6 +142,13 @@ static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift) ...@@ -142,6 +142,13 @@ static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
} }
#endif /* mul_u64_u32_shr */ #endif /* mul_u64_u32_shr */
#ifndef mul_u64_u64_shr
static inline u64 mul_u64_u64_shr(u64 a, u64 mul, unsigned int shift)
{
return (u64)(((unsigned __int128)a * mul) >> shift);
}
#endif /* mul_u64_u64_shr */
#else #else
#ifndef mul_u64_u32_shr #ifndef mul_u64_u32_shr
...@@ -161,6 +168,50 @@ static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift) ...@@ -161,6 +168,50 @@ static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
} }
#endif /* mul_u64_u32_shr */ #endif /* mul_u64_u32_shr */
#ifndef mul_u64_u64_shr
static inline u64 mul_u64_u64_shr(u64 a, u64 b, unsigned int shift)
{
union {
u64 ll;
struct {
#ifdef __BIG_ENDIAN
u32 high, low;
#else
u32 low, high;
#endif
} l;
} rl, rm, rn, rh, a0, b0;
u64 c;
a0.ll = a;
b0.ll = b;
rl.ll = (u64)a0.l.low * b0.l.low;
rm.ll = (u64)a0.l.low * b0.l.high;
rn.ll = (u64)a0.l.high * b0.l.low;
rh.ll = (u64)a0.l.high * b0.l.high;
/*
* Each of these lines computes a 64-bit intermediate result into "c",
* starting at bits 32-95. The low 32-bits go into the result of the
* multiplication, the high 32-bits are carried into the next step.
*/
rl.l.high = c = (u64)rl.l.high + rm.l.low + rn.l.low;
rh.l.low = c = (c >> 32) + rm.l.high + rn.l.high + rh.l.low;
rh.l.high = (c >> 32) + rh.l.high;
/*
* The 128-bit result of the multiplication is in rl.ll and rh.ll,
* shift it right and throw away the high part of the result.
*/
if (shift == 0)
return rl.ll;
if (shift < 64)
return (rl.ll >> shift) | (rh.ll << (64 - shift));
return rh.ll >> (shift & 63);
}
#endif /* mul_u64_u64_shr */
#endif #endif
#endif /* _LINUX_MATH64_H */ #endif /* _LINUX_MATH64_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment