Commit f81664f7 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull kvm fixes from Paolo Bonzini:
 "x86 guest:

   - Tweaks to the paravirtualization code, to avoid using them when
     they're pointless or harmful

  x86 host:

   - Fix for SRCU lockdep splat

   - Brown paper bag fix for the propagation of errno"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  KVM: x86: pull kvm->srcu read-side to kvm_arch_vcpu_ioctl_run
  KVM: x86/mmu: Passing up the error state of mmu_alloc_shadow_roots()
  KVM: x86: Yield to IPI target vCPU only if it is busy
  x86/kvmclock: Fix Hyper-V Isolated VM's boot issue when vCPUs > 64
  x86/kvm: Don't waste memory if kvmclock is disabled
  x86/kvm: Don't use PV TLB/yield when mwait is advertised
parents 9bdeaca1 8d25b7be
...@@ -463,6 +463,7 @@ static bool pv_tlb_flush_supported(void) ...@@ -463,6 +463,7 @@ static bool pv_tlb_flush_supported(void)
return (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) && return (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
!kvm_para_has_hint(KVM_HINTS_REALTIME) && !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
kvm_para_has_feature(KVM_FEATURE_STEAL_TIME) && kvm_para_has_feature(KVM_FEATURE_STEAL_TIME) &&
!boot_cpu_has(X86_FEATURE_MWAIT) &&
(num_possible_cpus() != 1)); (num_possible_cpus() != 1));
} }
...@@ -477,6 +478,7 @@ static bool pv_sched_yield_supported(void) ...@@ -477,6 +478,7 @@ static bool pv_sched_yield_supported(void)
return (kvm_para_has_feature(KVM_FEATURE_PV_SCHED_YIELD) && return (kvm_para_has_feature(KVM_FEATURE_PV_SCHED_YIELD) &&
!kvm_para_has_hint(KVM_HINTS_REALTIME) && !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
kvm_para_has_feature(KVM_FEATURE_STEAL_TIME) && kvm_para_has_feature(KVM_FEATURE_STEAL_TIME) &&
!boot_cpu_has(X86_FEATURE_MWAIT) &&
(num_possible_cpus() != 1)); (num_possible_cpus() != 1));
} }
...@@ -622,7 +624,7 @@ static void kvm_smp_send_call_func_ipi(const struct cpumask *mask) ...@@ -622,7 +624,7 @@ static void kvm_smp_send_call_func_ipi(const struct cpumask *mask)
/* Make sure other vCPUs get a chance to run if they need to. */ /* Make sure other vCPUs get a chance to run if they need to. */
for_each_cpu(cpu, mask) { for_each_cpu(cpu, mask) {
if (vcpu_is_preempted(cpu)) { if (!idle_cpu(cpu) && vcpu_is_preempted(cpu)) {
kvm_hypercall1(KVM_HC_SCHED_YIELD, per_cpu(x86_cpu_to_apicid, cpu)); kvm_hypercall1(KVM_HC_SCHED_YIELD, per_cpu(x86_cpu_to_apicid, cpu));
break; break;
} }
......
...@@ -239,6 +239,9 @@ static void __init kvmclock_init_mem(void) ...@@ -239,6 +239,9 @@ static void __init kvmclock_init_mem(void)
static int __init kvm_setup_vsyscall_timeinfo(void) static int __init kvm_setup_vsyscall_timeinfo(void)
{ {
if (!kvm_para_available() || !kvmclock)
return 0;
kvmclock_init_mem(); kvmclock_init_mem();
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
......
...@@ -3565,7 +3565,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) ...@@ -3565,7 +3565,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
out_unlock: out_unlock:
write_unlock(&vcpu->kvm->mmu_lock); write_unlock(&vcpu->kvm->mmu_lock);
return 0; return r;
} }
static int mmu_alloc_special_roots(struct kvm_vcpu *vcpu) static int mmu_alloc_special_roots(struct kvm_vcpu *vcpu)
......
...@@ -9180,6 +9180,7 @@ static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu) ...@@ -9180,6 +9180,7 @@ static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
likely(!pic_in_kernel(vcpu->kvm)); likely(!pic_in_kernel(vcpu->kvm));
} }
/* Called within kvm->srcu read side. */
static void post_kvm_run_save(struct kvm_vcpu *vcpu) static void post_kvm_run_save(struct kvm_vcpu *vcpu)
{ {
struct kvm_run *kvm_run = vcpu->run; struct kvm_run *kvm_run = vcpu->run;
...@@ -9188,16 +9189,9 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu) ...@@ -9188,16 +9189,9 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu)
kvm_run->cr8 = kvm_get_cr8(vcpu); kvm_run->cr8 = kvm_get_cr8(vcpu);
kvm_run->apic_base = kvm_get_apic_base(vcpu); kvm_run->apic_base = kvm_get_apic_base(vcpu);
/*
* The call to kvm_ready_for_interrupt_injection() may end up in
* kvm_xen_has_interrupt() which may require the srcu lock to be
* held, to protect against changes in the vcpu_info address.
*/
vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm_run->ready_for_interrupt_injection = kvm_run->ready_for_interrupt_injection =
pic_in_kernel(vcpu->kvm) || pic_in_kernel(vcpu->kvm) ||
kvm_vcpu_ready_for_interrupt_injection(vcpu); kvm_vcpu_ready_for_interrupt_injection(vcpu);
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
if (is_smm(vcpu)) if (is_smm(vcpu))
kvm_run->flags |= KVM_RUN_X86_SMM; kvm_run->flags |= KVM_RUN_X86_SMM;
...@@ -9815,6 +9809,7 @@ void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu) ...@@ -9815,6 +9809,7 @@ void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu)
EXPORT_SYMBOL_GPL(__kvm_request_immediate_exit); EXPORT_SYMBOL_GPL(__kvm_request_immediate_exit);
/* /*
* Called within kvm->srcu read side.
* Returns 1 to let vcpu_run() continue the guest execution loop without * Returns 1 to let vcpu_run() continue the guest execution loop without
* exiting to the userspace. Otherwise, the value will be returned to the * exiting to the userspace. Otherwise, the value will be returned to the
* userspace. * userspace.
...@@ -10193,6 +10188,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) ...@@ -10193,6 +10188,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
return r; return r;
} }
/* Called within kvm->srcu read side. */
static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu) static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
{ {
bool hv_timer; bool hv_timer;
...@@ -10252,12 +10248,12 @@ static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu) ...@@ -10252,12 +10248,12 @@ static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu)
!vcpu->arch.apf.halted); !vcpu->arch.apf.halted);
} }
/* Called within kvm->srcu read side. */
static int vcpu_run(struct kvm_vcpu *vcpu) static int vcpu_run(struct kvm_vcpu *vcpu)
{ {
int r; int r;
struct kvm *kvm = vcpu->kvm; struct kvm *kvm = vcpu->kvm;
vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
vcpu->arch.l1tf_flush_l1d = true; vcpu->arch.l1tf_flush_l1d = true;
for (;;) { for (;;) {
...@@ -10285,14 +10281,12 @@ static int vcpu_run(struct kvm_vcpu *vcpu) ...@@ -10285,14 +10281,12 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
if (__xfer_to_guest_mode_work_pending()) { if (__xfer_to_guest_mode_work_pending()) {
srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
r = xfer_to_guest_mode_handle_work(vcpu); r = xfer_to_guest_mode_handle_work(vcpu);
vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
if (r) if (r)
return r; return r;
vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
} }
} }
srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
return r; return r;
} }
...@@ -10398,6 +10392,7 @@ static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) ...@@ -10398,6 +10392,7 @@ static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
{ {
struct kvm_run *kvm_run = vcpu->run; struct kvm_run *kvm_run = vcpu->run;
struct kvm *kvm = vcpu->kvm;
int r; int r;
vcpu_load(vcpu); vcpu_load(vcpu);
...@@ -10405,6 +10400,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) ...@@ -10405,6 +10400,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
kvm_run->flags = 0; kvm_run->flags = 0;
kvm_load_guest_fpu(vcpu); kvm_load_guest_fpu(vcpu);
vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
if (kvm_run->immediate_exit) { if (kvm_run->immediate_exit) {
r = -EINTR; r = -EINTR;
...@@ -10415,7 +10411,11 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) ...@@ -10415,7 +10411,11 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
* use before KVM has ever run the vCPU. * use before KVM has ever run the vCPU.
*/ */
WARN_ON_ONCE(kvm_lapic_hv_timer_in_use(vcpu)); WARN_ON_ONCE(kvm_lapic_hv_timer_in_use(vcpu));
srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
kvm_vcpu_block(vcpu); kvm_vcpu_block(vcpu);
vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
if (kvm_apic_accept_events(vcpu) < 0) { if (kvm_apic_accept_events(vcpu) < 0) {
r = 0; r = 0;
goto out; goto out;
...@@ -10475,8 +10475,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) ...@@ -10475,8 +10475,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
if (kvm_run->kvm_valid_regs) if (kvm_run->kvm_valid_regs)
store_regs(vcpu); store_regs(vcpu);
post_kvm_run_save(vcpu); post_kvm_run_save(vcpu);
kvm_sigset_deactivate(vcpu); srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
kvm_sigset_deactivate(vcpu);
vcpu_put(vcpu); vcpu_put(vcpu);
return r; return r;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment