Commit 92c58965 authored by David Woodhouse's avatar David Woodhouse Committed by Paolo Bonzini

KVM: x86/xen: Use kvm_read_guest_virt() instead of open-coding it badly

In particular, we shouldn't assume that being contiguous in guest virtual
address space means being contiguous in guest *physical* address space.

In dropping the manual calls to kvm_mmu_gva_to_gpa_system(), also drop
the srcu_read_lock() that was around them. All call sites are reached
from kvm_xen_hypercall() which is called from the handle_exit function
with the read lock already held.

       53639526 ("KVM: x86/xen: handle PV timers oneshot mode")
       1a65105a ("KVM: x86/xen: handle PV spinlocks slowpath")

Fixes: 2fd6df2f ("KVM: x86/xen: intercept EVTCHNOP_send from guests")
Signed-off-by: default avatarDavid Woodhouse <dwmw@amazon.co.uk>
Message-Id: <20221226120320.1125390-2-dwmw2@infradead.org>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 385407a6
...@@ -1184,30 +1184,22 @@ static bool wait_pending_event(struct kvm_vcpu *vcpu, int nr_ports, ...@@ -1184,30 +1184,22 @@ static bool wait_pending_event(struct kvm_vcpu *vcpu, int nr_ports,
static bool kvm_xen_schedop_poll(struct kvm_vcpu *vcpu, bool longmode, static bool kvm_xen_schedop_poll(struct kvm_vcpu *vcpu, bool longmode,
u64 param, u64 *r) u64 param, u64 *r)
{ {
int idx, i;
struct sched_poll sched_poll; struct sched_poll sched_poll;
evtchn_port_t port, *ports; evtchn_port_t port, *ports;
gpa_t gpa; struct x86_exception e;
int i;
if (!lapic_in_kernel(vcpu) || if (!lapic_in_kernel(vcpu) ||
!(vcpu->kvm->arch.xen_hvm_config.flags & KVM_XEN_HVM_CONFIG_EVTCHN_SEND)) !(vcpu->kvm->arch.xen_hvm_config.flags & KVM_XEN_HVM_CONFIG_EVTCHN_SEND))
return false; return false;
idx = srcu_read_lock(&vcpu->kvm->srcu);
gpa = kvm_mmu_gva_to_gpa_system(vcpu, param, NULL);
srcu_read_unlock(&vcpu->kvm->srcu, idx);
if (!gpa) {
*r = -EFAULT;
return true;
}
if (IS_ENABLED(CONFIG_64BIT) && !longmode) { if (IS_ENABLED(CONFIG_64BIT) && !longmode) {
struct compat_sched_poll sp32; struct compat_sched_poll sp32;
/* Sanity check that the compat struct definition is correct */ /* Sanity check that the compat struct definition is correct */
BUILD_BUG_ON(sizeof(sp32) != 16); BUILD_BUG_ON(sizeof(sp32) != 16);
if (kvm_vcpu_read_guest(vcpu, gpa, &sp32, sizeof(sp32))) { if (kvm_read_guest_virt(vcpu, param, &sp32, sizeof(sp32), &e)) {
*r = -EFAULT; *r = -EFAULT;
return true; return true;
} }
...@@ -1221,8 +1213,8 @@ static bool kvm_xen_schedop_poll(struct kvm_vcpu *vcpu, bool longmode, ...@@ -1221,8 +1213,8 @@ static bool kvm_xen_schedop_poll(struct kvm_vcpu *vcpu, bool longmode,
sched_poll.nr_ports = sp32.nr_ports; sched_poll.nr_ports = sp32.nr_ports;
sched_poll.timeout = sp32.timeout; sched_poll.timeout = sp32.timeout;
} else { } else {
if (kvm_vcpu_read_guest(vcpu, gpa, &sched_poll, if (kvm_read_guest_virt(vcpu, param, &sched_poll,
sizeof(sched_poll))) { sizeof(sched_poll), &e)) {
*r = -EFAULT; *r = -EFAULT;
return true; return true;
} }
...@@ -1244,18 +1236,13 @@ static bool kvm_xen_schedop_poll(struct kvm_vcpu *vcpu, bool longmode, ...@@ -1244,18 +1236,13 @@ static bool kvm_xen_schedop_poll(struct kvm_vcpu *vcpu, bool longmode,
} else } else
ports = &port; ports = &port;
if (kvm_read_guest_virt(vcpu, (gva_t)sched_poll.ports, ports,
sched_poll.nr_ports * sizeof(*ports), &e)) {
*r = -EFAULT;
return true;
}
for (i = 0; i < sched_poll.nr_ports; i++) { for (i = 0; i < sched_poll.nr_ports; i++) {
idx = srcu_read_lock(&vcpu->kvm->srcu);
gpa = kvm_mmu_gva_to_gpa_system(vcpu,
(gva_t)(sched_poll.ports + i),
NULL);
srcu_read_unlock(&vcpu->kvm->srcu, idx);
if (!gpa || kvm_vcpu_read_guest(vcpu, gpa,
&ports[i], sizeof(port))) {
*r = -EFAULT;
goto out;
}
if (ports[i] >= max_evtchn_port(vcpu->kvm)) { if (ports[i] >= max_evtchn_port(vcpu->kvm)) {
*r = -EINVAL; *r = -EINVAL;
goto out; goto out;
...@@ -1331,9 +1318,8 @@ static bool kvm_xen_hcall_vcpu_op(struct kvm_vcpu *vcpu, bool longmode, int cmd, ...@@ -1331,9 +1318,8 @@ static bool kvm_xen_hcall_vcpu_op(struct kvm_vcpu *vcpu, bool longmode, int cmd,
int vcpu_id, u64 param, u64 *r) int vcpu_id, u64 param, u64 *r)
{ {
struct vcpu_set_singleshot_timer oneshot; struct vcpu_set_singleshot_timer oneshot;
struct x86_exception e;
s64 delta; s64 delta;
gpa_t gpa;
int idx;
if (!kvm_xen_timer_enabled(vcpu)) if (!kvm_xen_timer_enabled(vcpu))
return false; return false;
...@@ -1344,9 +1330,6 @@ static bool kvm_xen_hcall_vcpu_op(struct kvm_vcpu *vcpu, bool longmode, int cmd, ...@@ -1344,9 +1330,6 @@ static bool kvm_xen_hcall_vcpu_op(struct kvm_vcpu *vcpu, bool longmode, int cmd,
*r = -EINVAL; *r = -EINVAL;
return true; return true;
} }
idx = srcu_read_lock(&vcpu->kvm->srcu);
gpa = kvm_mmu_gva_to_gpa_system(vcpu, param, NULL);
srcu_read_unlock(&vcpu->kvm->srcu, idx);
/* /*
* The only difference for 32-bit compat is the 4 bytes of * The only difference for 32-bit compat is the 4 bytes of
...@@ -1364,9 +1347,8 @@ static bool kvm_xen_hcall_vcpu_op(struct kvm_vcpu *vcpu, bool longmode, int cmd, ...@@ -1364,9 +1347,8 @@ static bool kvm_xen_hcall_vcpu_op(struct kvm_vcpu *vcpu, bool longmode, int cmd,
BUILD_BUG_ON(sizeof_field(struct compat_vcpu_set_singleshot_timer, flags) != BUILD_BUG_ON(sizeof_field(struct compat_vcpu_set_singleshot_timer, flags) !=
sizeof_field(struct vcpu_set_singleshot_timer, flags)); sizeof_field(struct vcpu_set_singleshot_timer, flags));
if (!gpa || if (kvm_read_guest_virt(vcpu, param, &oneshot, longmode ? sizeof(oneshot) :
kvm_vcpu_read_guest(vcpu, gpa, &oneshot, longmode ? sizeof(oneshot) : sizeof(struct compat_vcpu_set_singleshot_timer), &e)) {
sizeof(struct compat_vcpu_set_singleshot_timer))) {
*r = -EFAULT; *r = -EFAULT;
return true; return true;
} }
...@@ -2003,14 +1985,12 @@ static bool kvm_xen_hcall_evtchn_send(struct kvm_vcpu *vcpu, u64 param, u64 *r) ...@@ -2003,14 +1985,12 @@ static bool kvm_xen_hcall_evtchn_send(struct kvm_vcpu *vcpu, u64 param, u64 *r)
{ {
struct evtchnfd *evtchnfd; struct evtchnfd *evtchnfd;
struct evtchn_send send; struct evtchn_send send;
gpa_t gpa; struct x86_exception e;
int idx;
idx = srcu_read_lock(&vcpu->kvm->srcu); /* Sanity check: this structure is the same for 32-bit and 64-bit */
gpa = kvm_mmu_gva_to_gpa_system(vcpu, param, NULL); BUILD_BUG_ON(sizeof(send) != 4);
srcu_read_unlock(&vcpu->kvm->srcu, idx);
if (!gpa || kvm_vcpu_read_guest(vcpu, gpa, &send, sizeof(send))) { if (kvm_read_guest_virt(vcpu, param, &send, sizeof(send), &e)) {
*r = -EFAULT; *r = -EFAULT;
return true; return true;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment