Commit 7caf9571 authored by David Woodhouse's avatar David Woodhouse Committed by Paolo Bonzini

KVM: x86/xen: Use gfn_to_pfn_cache for vcpu_info

Currently, the fast path of kvm_xen_set_evtchn_fast() doesn't set the
index bits in the target vCPU's evtchn_pending_sel, because it only has
a userspace virtual address with which to do so. It just sets them in
the kernel, and kvm_xen_has_interrupt() then completes the delivery to
the actual vcpu_info structure when the vCPU runs.

Using a gfn_to_pfn_cache allows kvm_xen_set_evtchn_fast() to do the full
delivery in the common case.

Clean up the fallback case too, by moving the deferred delivery out into
a separate kvm_xen_inject_pending_events() function which isn't ever
called in atomic contexts as __kvm_xen_has_interrupt() is.
Signed-off-by: default avatarDavid Woodhouse <dwmw@amazon.co.uk>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Message-Id: <20220303154127.202856-6-dwmw2@infradead.org>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 916d3608
......@@ -606,9 +606,8 @@ struct kvm_vcpu_hv {
struct kvm_vcpu_xen {
u64 hypercall_rip;
u32 current_runstate;
bool vcpu_info_set;
bool vcpu_time_info_set;
struct gfn_to_hva_cache vcpu_info_cache;
struct gfn_to_pfn_cache vcpu_info_cache;
struct gfn_to_hva_cache vcpu_time_info_cache;
struct gfn_to_pfn_cache runstate_cache;
u64 last_steal;
......
......@@ -3158,8 +3158,8 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
if (vcpu->pv_time.active)
kvm_setup_guest_pvclock(v, &vcpu->pv_time, 0);
if (vcpu->xen.vcpu_info_set)
kvm_setup_pvclock_page(v, &vcpu->xen.vcpu_info_cache,
if (vcpu->xen.vcpu_info_cache.active)
kvm_setup_guest_pvclock(v, &vcpu->xen.vcpu_info_cache,
offsetof(struct compat_vcpu_info, time));
if (vcpu->xen.vcpu_time_info_set)
kvm_setup_pvclock_page(v, &vcpu->xen.vcpu_time_info_cache, 0);
......@@ -10424,6 +10424,9 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
break;
kvm_clear_request(KVM_REQ_UNBLOCK, vcpu);
if (kvm_xen_has_pending_events(vcpu))
kvm_xen_inject_pending_events(vcpu);
if (kvm_cpu_has_pending_timer(vcpu))
kvm_inject_pending_timer_irqs(vcpu);
......@@ -12236,6 +12239,9 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
kvm_x86_ops.nested_ops->hv_timer_pending(vcpu))
return true;
if (kvm_xen_has_pending_events(vcpu))
return true;
return false;
}
......
This diff is collapsed.
......@@ -15,6 +15,7 @@
extern struct static_key_false_deferred kvm_xen_enabled;
int __kvm_xen_has_interrupt(struct kvm_vcpu *vcpu);
void kvm_xen_inject_pending_events(struct kvm_vcpu *vcpu);
int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data);
int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data);
int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data);
......@@ -46,11 +47,19 @@ static inline bool kvm_xen_hypercall_enabled(struct kvm *kvm)
static inline int kvm_xen_has_interrupt(struct kvm_vcpu *vcpu)
{
if (static_branch_unlikely(&kvm_xen_enabled.key) &&
vcpu->arch.xen.vcpu_info_set && vcpu->kvm->arch.xen.upcall_vector)
vcpu->arch.xen.vcpu_info_cache.active &&
vcpu->kvm->arch.xen.upcall_vector)
return __kvm_xen_has_interrupt(vcpu);
return 0;
}
static inline bool kvm_xen_has_pending_events(struct kvm_vcpu *vcpu)
{
return static_branch_unlikely(&kvm_xen_enabled.key) &&
vcpu->arch.xen.evtchn_pending_sel;
}
#else
static inline int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data)
{
......@@ -83,6 +92,15 @@ static inline int kvm_xen_has_interrupt(struct kvm_vcpu *vcpu)
{
return 0;
}
static inline void kvm_xen_inject_pending_events(struct kvm_vcpu *vcpu)
{
}
static inline bool kvm_xen_has_pending_events(struct kvm_vcpu *vcpu)
{
return false;
}
#endif
int kvm_xen_hypercall(struct kvm_vcpu *vcpu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment