Commit 15b51dc0 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86: Take KVM's SRCU lock only if steal time update is needed

Enter a SRCU critical section for a memslots lookup during steal time
update if and only if a steal time update is actually needed.  Taking
the lock can be avoided if steal time is disabled by the guest, or if
KVM knows it has already flagged the vCPU as being preempted.

Reword the comment to be more precise as to exactly why memslots will
be queried.
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Message-Id: <20210123000334.3123628-3-seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 19979fba
...@@ -4014,6 +4014,7 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu) ...@@ -4014,6 +4014,7 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
{ {
struct kvm_host_map map; struct kvm_host_map map;
struct kvm_steal_time *st; struct kvm_steal_time *st;
int idx;
if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
return; return;
...@@ -4021,9 +4022,15 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu) ...@@ -4021,9 +4022,15 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
if (vcpu->arch.st.preempted) if (vcpu->arch.st.preempted)
return; return;
/*
* Take the srcu lock as memslots will be accessed to check the gfn
* cache generation against the memslots generation.
*/
idx = srcu_read_lock(&vcpu->kvm->srcu);
if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT, &map, if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT, &map,
&vcpu->arch.st.cache, true)) &vcpu->arch.st.cache, true))
return; goto out;
st = map.hva + st = map.hva +
offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS); offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS);
...@@ -4031,22 +4038,17 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu) ...@@ -4031,22 +4038,17 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
st->preempted = vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED; st->preempted = vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED;
kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, true); kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, true);
out:
srcu_read_unlock(&vcpu->kvm->srcu, idx);
} }
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{ {
int idx;
if (vcpu->preempted && !vcpu->arch.guest_state_protected) if (vcpu->preempted && !vcpu->arch.guest_state_protected)
vcpu->arch.preempted_in_kernel = !kvm_x86_ops.get_cpl(vcpu); vcpu->arch.preempted_in_kernel = !kvm_x86_ops.get_cpl(vcpu);
/*
* kvm_memslots() will be called by
* kvm_write_guest_offset_cached() so take the srcu lock.
*/
idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm_steal_time_set_preempted(vcpu); kvm_steal_time_set_preempted(vcpu);
srcu_read_unlock(&vcpu->kvm->srcu, idx);
kvm_x86_ops.vcpu_put(vcpu); kvm_x86_ops.vcpu_put(vcpu);
vcpu->arch.last_host_tsc = rdtsc(); vcpu->arch.last_host_tsc = rdtsc();
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment