Commit e122d7a1 authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvm-x86-xen-6.7' of https://github.com/kvm-x86/linux into HEAD

KVM x86 Xen changes for 6.7:

 - Omit "struct kvm_vcpu_xen" entirely when CONFIG_KVM_XEN=n.

 - Use the fast path directly from the timer callback when delivering Xen timer
   events.  Avoid the problematic races with using the fast path by ensuring
   the hrtimer isn't running when (re)starting the timer or saving the timer
   information (for userspace).

 - Follow the lead of upstream Xen and ignore the VCPU_SSHOTTMR_future flag.
parents f0f59d06 409f2e92
......@@ -687,6 +687,7 @@ struct kvm_hypervisor_cpuid {
u32 limit;
};
#ifdef CONFIG_KVM_XEN
/* Xen HVM per vcpu emulation context */
struct kvm_vcpu_xen {
u64 hypercall_rip;
......@@ -709,6 +710,7 @@ struct kvm_vcpu_xen {
struct timer_list poll_timer;
struct kvm_hypervisor_cpuid cpuid;
};
#endif
struct kvm_queued_exception {
bool pending;
......@@ -937,8 +939,9 @@ struct kvm_vcpu_arch {
bool hyperv_enabled;
struct kvm_vcpu_hv *hyperv;
#ifdef CONFIG_KVM_XEN
struct kvm_vcpu_xen xen;
#endif
cpumask_var_t wbinvd_dirty_mask;
unsigned long last_retry_eip;
......
......@@ -448,7 +448,9 @@ static int kvm_set_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *e2,
vcpu->arch.cpuid_nent = nent;
vcpu->arch.kvm_cpuid = kvm_get_hypervisor_cpuid(vcpu, KVM_SIGNATURE);
#ifdef CONFIG_KVM_XEN
vcpu->arch.xen.cpuid = kvm_get_hypervisor_cpuid(vcpu, XEN_SIGNATURE);
#endif
kvm_vcpu_after_set_cpuid(vcpu);
return 0;
......
......@@ -3240,11 +3240,13 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
if (vcpu->pv_time.active)
kvm_setup_guest_pvclock(v, &vcpu->pv_time, 0);
#ifdef CONFIG_KVM_XEN
if (vcpu->xen.vcpu_info_cache.active)
kvm_setup_guest_pvclock(v, &vcpu->xen.vcpu_info_cache,
offsetof(struct compat_vcpu_info, time));
if (vcpu->xen.vcpu_time_info_cache.active)
kvm_setup_guest_pvclock(v, &vcpu->xen.vcpu_time_info_cache, 0);
#endif
kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock);
return 0;
}
......
......@@ -134,9 +134,23 @@ static enum hrtimer_restart xen_timer_callback(struct hrtimer *timer)
{
struct kvm_vcpu *vcpu = container_of(timer, struct kvm_vcpu,
arch.xen.timer);
struct kvm_xen_evtchn e;
int rc;
if (atomic_read(&vcpu->arch.xen.timer_pending))
return HRTIMER_NORESTART;
e.vcpu_id = vcpu->vcpu_id;
e.vcpu_idx = vcpu->vcpu_idx;
e.port = vcpu->arch.xen.timer_virq;
e.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL;
rc = kvm_xen_set_evtchn_fast(&e, vcpu->kvm);
if (rc != -EWOULDBLOCK) {
vcpu->arch.xen.timer_expires = 0;
return HRTIMER_NORESTART;
}
atomic_inc(&vcpu->arch.xen.timer_pending);
kvm_make_request(KVM_REQ_UNBLOCK, vcpu);
kvm_vcpu_kick(vcpu);
......@@ -146,6 +160,14 @@ static enum hrtimer_restart xen_timer_callback(struct hrtimer *timer)
static void kvm_xen_start_timer(struct kvm_vcpu *vcpu, u64 guest_abs, s64 delta_ns)
{
/*
* Avoid races with the old timer firing. Checking timer_expires
* to avoid calling hrtimer_cancel() will only have false positives
* so is fine.
*/
if (vcpu->arch.xen.timer_expires)
hrtimer_cancel(&vcpu->arch.xen.timer);
atomic_set(&vcpu->arch.xen.timer_pending, 0);
vcpu->arch.xen.timer_expires = guest_abs;
......@@ -1019,9 +1041,36 @@ int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
break;
case KVM_XEN_VCPU_ATTR_TYPE_TIMER:
/*
* Ensure a consistent snapshot of state is captured, with a
* timer either being pending, or the event channel delivered
* to the corresponding bit in the shared_info. Not still
* lurking in the timer_pending flag for deferred delivery.
* Purely as an optimisation, if the timer_expires field is
* zero, that means the timer isn't active (or even in the
* timer_pending flag) and there is no need to cancel it.
*/
if (vcpu->arch.xen.timer_expires) {
hrtimer_cancel(&vcpu->arch.xen.timer);
kvm_xen_inject_timer_irqs(vcpu);
}
data->u.timer.port = vcpu->arch.xen.timer_virq;
data->u.timer.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL;
data->u.timer.expires_ns = vcpu->arch.xen.timer_expires;
/*
* The hrtimer may trigger and raise the IRQ immediately,
* while the returned state causes it to be set up and
* raised again on the destination system after migration.
* That's fine, as the guest won't even have had a chance
* to run and handle the interrupt. Asserting an already
* pending event channel is idempotent.
*/
if (vcpu->arch.xen.timer_expires)
hrtimer_start_expires(&vcpu->arch.xen.timer,
HRTIMER_MODE_ABS_HARD);
r = 0;
break;
......@@ -1374,12 +1423,8 @@ static bool kvm_xen_hcall_vcpu_op(struct kvm_vcpu *vcpu, bool longmode, int cmd,
return true;
}
/* A delta <= 0 results in an immediate callback, which is what we want */
delta = oneshot.timeout_abs_ns - get_kvmclock_ns(vcpu->kvm);
if ((oneshot.flags & VCPU_SSHOTTMR_future) && delta < 0) {
*r = -ETIME;
return true;
}
kvm_xen_start_timer(vcpu, oneshot.timeout_abs_ns, delta);
*r = 0;
return true;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment