Commit 3a0e7731 authored by Roman Kagan's avatar Roman Kagan Committed by Paolo Bonzini

x86: kvm: hyperv: simplify SynIC message delivery

SynIC message delivery is somewhat overengineered: it pretends to follow
the ordering rules when grabbing the message slot, using atomic
operations and all that, but does it incorrectly and unnecessarily.

The correct order would be to first set .msg_pending, then atomically
replace .message_type if it was zero, and then clear .msg_pending if
the previous step was successful.  But this all is done in vcpu context
so the whole update looks atomic to the guest (it's assumed to only
access the message page from this cpu), and therefore can be done in
whatever order is most convenient (and is also the reason why the
incorrect order didn't trigger any bugs so far).

While at this, also switch to kvm_vcpu_{read,write}_guest_page, and drop
the no longer needed synic_clear_sint_msg_pending.
Signed-off-by: default avatarRoman Kagan <rkagan@virtuozzo.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent eb1ff0a9
...@@ -158,32 +158,6 @@ static struct kvm_vcpu_hv_synic *synic_get(struct kvm *kvm, u32 vpidx) ...@@ -158,32 +158,6 @@ static struct kvm_vcpu_hv_synic *synic_get(struct kvm *kvm, u32 vpidx)
return (synic->active) ? synic : NULL; return (synic->active) ? synic : NULL;
} }
static void synic_clear_sint_msg_pending(struct kvm_vcpu_hv_synic *synic,
u32 sint)
{
struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
struct page *page;
gpa_t gpa;
struct hv_message *msg;
struct hv_message_page *msg_page;
gpa = synic->msg_page & PAGE_MASK;
page = kvm_vcpu_gfn_to_page(vcpu, gpa >> PAGE_SHIFT);
if (is_error_page(page)) {
vcpu_err(vcpu, "Hyper-V SynIC can't get msg page, gpa 0x%llx\n",
gpa);
return;
}
msg_page = kmap_atomic(page);
msg = &msg_page->sint_message[sint];
msg->header.message_flags.msg_pending = 0;
kunmap_atomic(msg_page);
kvm_release_page_dirty(page);
kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT);
}
static void kvm_hv_notify_acked_sint(struct kvm_vcpu *vcpu, u32 sint) static void kvm_hv_notify_acked_sint(struct kvm_vcpu *vcpu, u32 sint)
{ {
struct kvm *kvm = vcpu->kvm; struct kvm *kvm = vcpu->kvm;
...@@ -194,9 +168,6 @@ static void kvm_hv_notify_acked_sint(struct kvm_vcpu *vcpu, u32 sint) ...@@ -194,9 +168,6 @@ static void kvm_hv_notify_acked_sint(struct kvm_vcpu *vcpu, u32 sint)
trace_kvm_hv_notify_acked_sint(vcpu->vcpu_id, sint); trace_kvm_hv_notify_acked_sint(vcpu->vcpu_id, sint);
if (synic->msg_page & HV_SYNIC_SIMP_ENABLE)
synic_clear_sint_msg_pending(synic, sint);
/* Try to deliver pending Hyper-V SynIC timers messages */ /* Try to deliver pending Hyper-V SynIC timers messages */
stimers_pending = 0; stimers_pending = 0;
for (idx = 0; idx < ARRAY_SIZE(hv_vcpu->stimer); idx++) { for (idx = 0; idx < ARRAY_SIZE(hv_vcpu->stimer); idx++) {
...@@ -589,41 +560,54 @@ static int synic_deliver_msg(struct kvm_vcpu_hv_synic *synic, u32 sint, ...@@ -589,41 +560,54 @@ static int synic_deliver_msg(struct kvm_vcpu_hv_synic *synic, u32 sint,
struct hv_message *src_msg) struct hv_message *src_msg)
{ {
struct kvm_vcpu *vcpu = synic_to_vcpu(synic); struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
struct page *page; int msg_off = offsetof(struct hv_message_page, sint_message[sint]);
gpa_t gpa; gfn_t msg_page_gfn;
struct hv_message *dst_msg; struct hv_message_header hv_hdr;
int r; int r;
struct hv_message_page *msg_page;
if (!(synic->msg_page & HV_SYNIC_SIMP_ENABLE)) if (!(synic->msg_page & HV_SYNIC_SIMP_ENABLE))
return -ENOENT; return -ENOENT;
gpa = synic->msg_page & PAGE_MASK; msg_page_gfn = synic->msg_page >> PAGE_SHIFT;
page = kvm_vcpu_gfn_to_page(vcpu, gpa >> PAGE_SHIFT);
if (is_error_page(page))
return -EFAULT;
msg_page = kmap_atomic(page); /*
dst_msg = &msg_page->sint_message[sint]; * Strictly following the spec-mandated ordering would assume setting
if (sync_cmpxchg(&dst_msg->header.message_type, HVMSG_NONE, * .msg_pending before checking .message_type. However, this function
src_msg->header.message_type) != HVMSG_NONE) { * is only called in vcpu context so the entire update is atomic from
dst_msg->header.message_flags.msg_pending = 1; * guest POV and thus the exact order here doesn't matter.
r = -EAGAIN; */
} else { r = kvm_vcpu_read_guest_page(vcpu, msg_page_gfn, &hv_hdr.message_type,
memcpy(&dst_msg->u.payload, &src_msg->u.payload, msg_off + offsetof(struct hv_message,
src_msg->header.payload_size); header.message_type),
dst_msg->header.message_type = src_msg->header.message_type; sizeof(hv_hdr.message_type));
dst_msg->header.payload_size = src_msg->header.payload_size; if (r < 0)
r = synic_set_irq(synic, sint); return r;
if (r >= 1)
r = 0; if (hv_hdr.message_type != HVMSG_NONE) {
else if (r == 0) hv_hdr.message_flags.msg_pending = 1;
r = -EFAULT; r = kvm_vcpu_write_guest_page(vcpu, msg_page_gfn,
&hv_hdr.message_flags,
msg_off +
offsetof(struct hv_message,
header.message_flags),
sizeof(hv_hdr.message_flags));
if (r < 0)
return r;
return -EAGAIN;
} }
kunmap_atomic(msg_page);
kvm_release_page_dirty(page); r = kvm_vcpu_write_guest_page(vcpu, msg_page_gfn, src_msg, msg_off,
kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT); sizeof(src_msg->header) +
return r; src_msg->header.payload_size);
if (r < 0)
return r;
r = synic_set_irq(synic, sint);
if (r < 0)
return r;
if (r == 0)
return -EFAULT;
return 0;
} }
static int stimer_send_msg(struct kvm_vcpu_hv_stimer *stimer) static int stimer_send_msg(struct kvm_vcpu_hv_stimer *stimer)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment