Commit 24647e0a authored by Peter Shier's avatar Peter Shier Committed by Paolo Bonzini

KVM: x86: Return updated timer current count register from KVM_GET_LAPIC

kvm_vcpu_ioctl_get_lapic (implements KVM_GET_LAPIC ioctl) does a bulk copy
of the LAPIC registers but must take into account that the one-shot and
periodic timer current count register is computed upon reads and is not
present in register state. When restoring LAPIC state (e.g. after
migration), restart timers from their their current count values at time of
save.

Note: When a one-shot timer expires, the code in arch/x86/kvm/lapic.c does
not zero the value of the LAPIC initial count register (emulating HW
behavior). If no other timer is run and pending prior to a subsequent
KVM_GET_LAPIC call, the returned register set will include the expired
one-shot initial count. On a subsequent KVM_SET_LAPIC call the code will
see a non-zero initial count and start a new one-shot timer using the
expired timer's count. This is a prior existing bug and will be addressed
in a separate patch. Thanks to jmattson@google.com for this find.
Signed-off-by: default avatarPeter Shier <pshier@google.com>
Reviewed-by: default avatarJim Mattson <jmattson@google.com>
Reviewed-by: default avatarWanpeng Li <wanpengli@tencent.com>
Message-Id: <20181010225653.238911-1-pshier@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 788109c1
...@@ -1648,13 +1648,18 @@ static void start_sw_tscdeadline(struct kvm_lapic *apic) ...@@ -1648,13 +1648,18 @@ static void start_sw_tscdeadline(struct kvm_lapic *apic)
local_irq_restore(flags); local_irq_restore(flags);
} }
static inline u64 tmict_to_ns(struct kvm_lapic *apic, u32 tmict)
{
return (u64)tmict * APIC_BUS_CYCLE_NS * (u64)apic->divide_count;
}
static void update_target_expiration(struct kvm_lapic *apic, uint32_t old_divisor) static void update_target_expiration(struct kvm_lapic *apic, uint32_t old_divisor)
{ {
ktime_t now, remaining; ktime_t now, remaining;
u64 ns_remaining_old, ns_remaining_new; u64 ns_remaining_old, ns_remaining_new;
apic->lapic_timer.period = (u64)kvm_lapic_get_reg(apic, APIC_TMICT) apic->lapic_timer.period =
* APIC_BUS_CYCLE_NS * apic->divide_count; tmict_to_ns(apic, kvm_lapic_get_reg(apic, APIC_TMICT));
limit_periodic_timer_frequency(apic); limit_periodic_timer_frequency(apic);
now = ktime_get(); now = ktime_get();
...@@ -1672,14 +1677,15 @@ static void update_target_expiration(struct kvm_lapic *apic, uint32_t old_diviso ...@@ -1672,14 +1677,15 @@ static void update_target_expiration(struct kvm_lapic *apic, uint32_t old_diviso
apic->lapic_timer.target_expiration = ktime_add_ns(now, ns_remaining_new); apic->lapic_timer.target_expiration = ktime_add_ns(now, ns_remaining_new);
} }
static bool set_target_expiration(struct kvm_lapic *apic) static bool set_target_expiration(struct kvm_lapic *apic, u32 count_reg)
{ {
ktime_t now; ktime_t now;
u64 tscl = rdtsc(); u64 tscl = rdtsc();
s64 deadline;
now = ktime_get(); now = ktime_get();
apic->lapic_timer.period = (u64)kvm_lapic_get_reg(apic, APIC_TMICT) apic->lapic_timer.period =
* APIC_BUS_CYCLE_NS * apic->divide_count; tmict_to_ns(apic, kvm_lapic_get_reg(apic, APIC_TMICT));
if (!apic->lapic_timer.period) { if (!apic->lapic_timer.period) {
apic->lapic_timer.tscdeadline = 0; apic->lapic_timer.tscdeadline = 0;
...@@ -1687,10 +1693,32 @@ static bool set_target_expiration(struct kvm_lapic *apic) ...@@ -1687,10 +1693,32 @@ static bool set_target_expiration(struct kvm_lapic *apic)
} }
limit_periodic_timer_frequency(apic); limit_periodic_timer_frequency(apic);
deadline = apic->lapic_timer.period;
if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic)) {
if (unlikely(count_reg != APIC_TMICT)) {
deadline = tmict_to_ns(apic,
kvm_lapic_get_reg(apic, count_reg));
if (unlikely(deadline <= 0))
deadline = apic->lapic_timer.period;
else if (unlikely(deadline > apic->lapic_timer.period)) {
pr_info_ratelimited(
"kvm: vcpu %i: requested lapic timer restore with "
"starting count register %#x=%u (%lld ns) > initial count (%lld ns). "
"Using initial count to start timer.\n",
apic->vcpu->vcpu_id,
count_reg,
kvm_lapic_get_reg(apic, count_reg),
deadline, apic->lapic_timer.period);
kvm_lapic_set_reg(apic, count_reg, 0);
deadline = apic->lapic_timer.period;
}
}
}
apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) + apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) +
nsec_to_cycles(apic->vcpu, apic->lapic_timer.period); nsec_to_cycles(apic->vcpu, deadline);
apic->lapic_timer.target_expiration = ktime_add_ns(now, apic->lapic_timer.period); apic->lapic_timer.target_expiration = ktime_add_ns(now, deadline);
return true; return true;
} }
...@@ -1872,17 +1900,22 @@ void kvm_lapic_restart_hv_timer(struct kvm_vcpu *vcpu) ...@@ -1872,17 +1900,22 @@ void kvm_lapic_restart_hv_timer(struct kvm_vcpu *vcpu)
restart_apic_timer(apic); restart_apic_timer(apic);
} }
static void start_apic_timer(struct kvm_lapic *apic) static void __start_apic_timer(struct kvm_lapic *apic, u32 count_reg)
{ {
atomic_set(&apic->lapic_timer.pending, 0); atomic_set(&apic->lapic_timer.pending, 0);
if ((apic_lvtt_period(apic) || apic_lvtt_oneshot(apic)) if ((apic_lvtt_period(apic) || apic_lvtt_oneshot(apic))
&& !set_target_expiration(apic)) && !set_target_expiration(apic, count_reg))
return; return;
restart_apic_timer(apic); restart_apic_timer(apic);
} }
static void start_apic_timer(struct kvm_lapic *apic)
{
__start_apic_timer(apic, APIC_TMICT);
}
static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val) static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val)
{ {
bool lvt0_in_nmi_mode = apic_lvt_nmi_mode(lvt0_val); bool lvt0_in_nmi_mode = apic_lvt_nmi_mode(lvt0_val);
...@@ -2493,6 +2526,14 @@ static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu, ...@@ -2493,6 +2526,14 @@ static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu,
int kvm_apic_get_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s) int kvm_apic_get_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
{ {
memcpy(s->regs, vcpu->arch.apic->regs, sizeof(*s)); memcpy(s->regs, vcpu->arch.apic->regs, sizeof(*s));
/*
* Get calculated timer current count for remaining timer period (if
* any) and store it in the returned register set.
*/
__kvm_lapic_set_reg(s->regs, APIC_TMCCT,
__apic_read(vcpu->arch.apic, APIC_TMCCT));
return kvm_apic_state_fixup(vcpu, s, false); return kvm_apic_state_fixup(vcpu, s, false);
} }
...@@ -2520,7 +2561,7 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s) ...@@ -2520,7 +2561,7 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
apic_update_lvtt(apic); apic_update_lvtt(apic);
apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0)); apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
update_divide_count(apic); update_divide_count(apic);
start_apic_timer(apic); __start_apic_timer(apic, APIC_TMCCT);
kvm_apic_update_apicv(vcpu); kvm_apic_update_apicv(vcpu);
apic->highest_isr_cache = -1; apic->highest_isr_cache = -1;
if (vcpu->arch.apicv_active) { if (vcpu->arch.apicv_active) {
......
...@@ -161,9 +161,14 @@ static inline u32 kvm_lapic_get_reg(struct kvm_lapic *apic, int reg_off) ...@@ -161,9 +161,14 @@ static inline u32 kvm_lapic_get_reg(struct kvm_lapic *apic, int reg_off)
return *((u32 *) (apic->regs + reg_off)); return *((u32 *) (apic->regs + reg_off));
} }
static inline void __kvm_lapic_set_reg(char *regs, int reg_off, u32 val)
{
*((u32 *) (regs + reg_off)) = val;
}
static inline void kvm_lapic_set_reg(struct kvm_lapic *apic, int reg_off, u32 val) static inline void kvm_lapic_set_reg(struct kvm_lapic *apic, int reg_off, u32 val)
{ {
*((u32 *) (apic->regs + reg_off)) = val; __kvm_lapic_set_reg(apic->regs, reg_off, val);
} }
extern struct static_key kvm_no_apic_vcpu; extern struct static_key kvm_no_apic_vcpu;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment