Commit a83829f5 authored by Paolo Bonzini's avatar Paolo Bonzini

KVM: x86: disable interrupts while pvclock_gtod_sync_lock is taken

pvclock_gtod_sync_lock can be taken with interrupts disabled if the
preempt notifier calls get_kvmclock_ns to update the Xen
runstate information:

   spin_lock include/linux/spinlock.h:354 [inline]
   get_kvmclock_ns+0x25/0x390 arch/x86/kvm/x86.c:2587
   kvm_xen_update_runstate+0x3d/0x2c0 arch/x86/kvm/xen.c:69
   kvm_xen_update_runstate_guest+0x74/0x320 arch/x86/kvm/xen.c:100
   kvm_xen_runstate_set_preempted arch/x86/kvm/xen.h:96 [inline]
   kvm_arch_vcpu_put+0x2d8/0x5a0 arch/x86/kvm/x86.c:4062

So change the users of the spinlock to spin_lock_irqsave and
spin_unlock_irqrestore.

Reported-by: syzbot+b282b65c2c68492df769@syzkaller.appspotmail.com
Fixes: 30b5c851 ("KVM: x86/xen: Add support for vCPU runstate information")
Cc: David Woodhouse <dwmw@amazon.co.uk>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent c2c647f9
...@@ -2328,7 +2328,7 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data) ...@@ -2328,7 +2328,7 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data)
kvm_vcpu_write_tsc_offset(vcpu, offset); kvm_vcpu_write_tsc_offset(vcpu, offset);
raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
spin_lock(&kvm->arch.pvclock_gtod_sync_lock); spin_lock_irqsave(&kvm->arch.pvclock_gtod_sync_lock, flags);
if (!matched) { if (!matched) {
kvm->arch.nr_vcpus_matched_tsc = 0; kvm->arch.nr_vcpus_matched_tsc = 0;
} else if (!already_matched) { } else if (!already_matched) {
...@@ -2336,7 +2336,7 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data) ...@@ -2336,7 +2336,7 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data)
} }
kvm_track_tsc_matching(vcpu); kvm_track_tsc_matching(vcpu);
spin_unlock(&kvm->arch.pvclock_gtod_sync_lock); spin_unlock_irqrestore(&kvm->arch.pvclock_gtod_sync_lock, flags);
} }
static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu, static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
...@@ -2558,15 +2558,16 @@ static void kvm_gen_update_masterclock(struct kvm *kvm) ...@@ -2558,15 +2558,16 @@ static void kvm_gen_update_masterclock(struct kvm *kvm)
int i; int i;
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
struct kvm_arch *ka = &kvm->arch; struct kvm_arch *ka = &kvm->arch;
unsigned long flags;
kvm_hv_invalidate_tsc_page(kvm); kvm_hv_invalidate_tsc_page(kvm);
kvm_make_mclock_inprogress_request(kvm); kvm_make_mclock_inprogress_request(kvm);
/* no guest entries from this point */ /* no guest entries from this point */
spin_lock(&ka->pvclock_gtod_sync_lock); spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
pvclock_update_vm_gtod_copy(kvm); pvclock_update_vm_gtod_copy(kvm);
spin_unlock(&ka->pvclock_gtod_sync_lock); spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
kvm_for_each_vcpu(i, vcpu, kvm) kvm_for_each_vcpu(i, vcpu, kvm)
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
...@@ -2581,17 +2582,18 @@ u64 get_kvmclock_ns(struct kvm *kvm) ...@@ -2581,17 +2582,18 @@ u64 get_kvmclock_ns(struct kvm *kvm)
{ {
struct kvm_arch *ka = &kvm->arch; struct kvm_arch *ka = &kvm->arch;
struct pvclock_vcpu_time_info hv_clock; struct pvclock_vcpu_time_info hv_clock;
unsigned long flags;
u64 ret; u64 ret;
spin_lock(&ka->pvclock_gtod_sync_lock); spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
if (!ka->use_master_clock) { if (!ka->use_master_clock) {
spin_unlock(&ka->pvclock_gtod_sync_lock); spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
return get_kvmclock_base_ns() + ka->kvmclock_offset; return get_kvmclock_base_ns() + ka->kvmclock_offset;
} }
hv_clock.tsc_timestamp = ka->master_cycle_now; hv_clock.tsc_timestamp = ka->master_cycle_now;
hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset; hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset;
spin_unlock(&ka->pvclock_gtod_sync_lock); spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
/* both __this_cpu_read() and rdtsc() should be on the same cpu */ /* both __this_cpu_read() and rdtsc() should be on the same cpu */
get_cpu(); get_cpu();
...@@ -2685,13 +2687,13 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) ...@@ -2685,13 +2687,13 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
* If the host uses TSC clock, then passthrough TSC as stable * If the host uses TSC clock, then passthrough TSC as stable
* to the guest. * to the guest.
*/ */
spin_lock(&ka->pvclock_gtod_sync_lock); spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
use_master_clock = ka->use_master_clock; use_master_clock = ka->use_master_clock;
if (use_master_clock) { if (use_master_clock) {
host_tsc = ka->master_cycle_now; host_tsc = ka->master_cycle_now;
kernel_ns = ka->master_kernel_ns; kernel_ns = ka->master_kernel_ns;
} }
spin_unlock(&ka->pvclock_gtod_sync_lock); spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
/* Keep irq disabled to prevent changes to the clock */ /* Keep irq disabled to prevent changes to the clock */
local_irq_save(flags); local_irq_save(flags);
...@@ -7723,6 +7725,7 @@ static void kvm_hyperv_tsc_notifier(void) ...@@ -7723,6 +7725,7 @@ static void kvm_hyperv_tsc_notifier(void)
struct kvm *kvm; struct kvm *kvm;
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
int cpu; int cpu;
unsigned long flags;
mutex_lock(&kvm_lock); mutex_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list) list_for_each_entry(kvm, &vm_list, vm_list)
...@@ -7738,9 +7741,9 @@ static void kvm_hyperv_tsc_notifier(void) ...@@ -7738,9 +7741,9 @@ static void kvm_hyperv_tsc_notifier(void)
list_for_each_entry(kvm, &vm_list, vm_list) { list_for_each_entry(kvm, &vm_list, vm_list) {
struct kvm_arch *ka = &kvm->arch; struct kvm_arch *ka = &kvm->arch;
spin_lock(&ka->pvclock_gtod_sync_lock); spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
pvclock_update_vm_gtod_copy(kvm); pvclock_update_vm_gtod_copy(kvm);
spin_unlock(&ka->pvclock_gtod_sync_lock); spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
kvm_for_each_vcpu(cpu, vcpu, kvm) kvm_for_each_vcpu(cpu, vcpu, kvm)
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment