Commit 9c1a0744 authored by Wanpeng Li's avatar Wanpeng Li Committed by Paolo Bonzini

KVM: x86/xen: Take srcu lock when accessing kvm_memslots()

kvm_memslots() will be called by kvm_write_guest_offset_cached() so we should
take the srcu lock. Let's pull the srcu lock operation from kvm_steal_time_set_preempted()
again to fix xen part.

Fixes: 30b5c851 ("KVM: x86/xen: Add support for vCPU runstate information")
Signed-off-by: default avatarWanpeng Li <wanpengli@tencent.com>
Message-Id: <1619166200-9215-1-git-send-email-wanpengli@tencent.com>
Reviewed-by: default avatarSean Christopherson <seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent bf05bf16
...@@ -4025,7 +4025,6 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu) ...@@ -4025,7 +4025,6 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
{ {
struct kvm_host_map map; struct kvm_host_map map;
struct kvm_steal_time *st; struct kvm_steal_time *st;
int idx;
if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
return; return;
...@@ -4033,15 +4032,9 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu) ...@@ -4033,15 +4032,9 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
if (vcpu->arch.st.preempted) if (vcpu->arch.st.preempted)
return; return;
/*
* Take the srcu lock as memslots will be accessed to check the gfn
* cache generation against the memslots generation.
*/
idx = srcu_read_lock(&vcpu->kvm->srcu);
if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT, &map, if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT, &map,
&vcpu->arch.st.cache, true)) &vcpu->arch.st.cache, true))
goto out; return;
st = map.hva + st = map.hva +
offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS); offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS);
...@@ -4049,20 +4042,25 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu) ...@@ -4049,20 +4042,25 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
st->preempted = vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED; st->preempted = vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED;
kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, true); kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, true);
out:
srcu_read_unlock(&vcpu->kvm->srcu, idx);
} }
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{ {
int idx;
if (vcpu->preempted && !vcpu->arch.guest_state_protected) if (vcpu->preempted && !vcpu->arch.guest_state_protected)
vcpu->arch.preempted_in_kernel = !static_call(kvm_x86_get_cpl)(vcpu); vcpu->arch.preempted_in_kernel = !static_call(kvm_x86_get_cpl)(vcpu);
/*
* Take the srcu lock as memslots will be accessed to check the gfn
* cache generation against the memslots generation.
*/
idx = srcu_read_lock(&vcpu->kvm->srcu);
if (kvm_xen_msr_enabled(vcpu->kvm)) if (kvm_xen_msr_enabled(vcpu->kvm))
kvm_xen_runstate_set_preempted(vcpu); kvm_xen_runstate_set_preempted(vcpu);
else else
kvm_steal_time_set_preempted(vcpu); kvm_steal_time_set_preempted(vcpu);
srcu_read_unlock(&vcpu->kvm->srcu, idx);
static_call(kvm_x86_vcpu_put)(vcpu); static_call(kvm_x86_vcpu_put)(vcpu);
vcpu->arch.last_host_tsc = rdtsc(); vcpu->arch.last_host_tsc = rdtsc();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment