Commit 510958e9 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: Force PPC to define its own rcuwait object

Do not define/reference kvm_vcpu.wait if __KVM_HAVE_ARCH_WQP is true, and
instead force the architecture (PPC) to define its own rcuwait object.
Allowing common KVM to directly access vcpu->wait without a guard makes
it all too easy to introduce potential bugs, e.g. kvm_vcpu_block(),
kvm_vcpu_on_spin(), and async_pf_execute() all operate on vcpu->wait, not
the result of kvm_arch_vcpu_get_wait(), and so may do the wrong thing for
PPC.

Due to PPC's shenanigans with respect to callbacks and waits (it switches
to the virtual core's wait object at KVM_RUN!?!?), it's not clear whether
or not this fixes any bugs.
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Message-Id: <20211009021236.4122790-5-seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 6f390916
...@@ -749,6 +749,7 @@ struct kvm_vcpu_arch { ...@@ -749,6 +749,7 @@ struct kvm_vcpu_arch {
u8 irq_pending; /* Used by XIVE to signal pending guest irqs */ u8 irq_pending; /* Used by XIVE to signal pending guest irqs */
u32 last_inst; u32 last_inst;
struct rcuwait wait;
struct rcuwait *waitp; struct rcuwait *waitp;
struct kvmppc_vcore *vcore; struct kvmppc_vcore *vcore;
int ret; int ret;
......
...@@ -753,7 +753,8 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) ...@@ -753,7 +753,8 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
if (err) if (err)
goto out_vcpu_uninit; goto out_vcpu_uninit;
vcpu->arch.waitp = &vcpu->wait; rcuwait_init(&vcpu->arch.wait);
vcpu->arch.waitp = &vcpu->arch.wait;
kvmppc_create_vcpu_debugfs(vcpu, vcpu->vcpu_id); kvmppc_create_vcpu_debugfs(vcpu, vcpu->vcpu_id);
return 0; return 0;
......
...@@ -314,7 +314,9 @@ struct kvm_vcpu { ...@@ -314,7 +314,9 @@ struct kvm_vcpu {
struct mutex mutex; struct mutex mutex;
struct kvm_run *run; struct kvm_run *run;
#ifndef __KVM_HAVE_ARCH_WQP
struct rcuwait wait; struct rcuwait wait;
#endif
struct pid __rcu *pid; struct pid __rcu *pid;
int sigset_active; int sigset_active;
sigset_t sigset; sigset_t sigset;
......
...@@ -85,7 +85,7 @@ static void async_pf_execute(struct work_struct *work) ...@@ -85,7 +85,7 @@ static void async_pf_execute(struct work_struct *work)
trace_kvm_async_pf_completed(addr, cr2_or_gpa); trace_kvm_async_pf_completed(addr, cr2_or_gpa);
rcuwait_wake_up(&vcpu->wait); rcuwait_wake_up(kvm_arch_vcpu_get_wait(vcpu));
mmput(mm); mmput(mm);
kvm_put_kvm(vcpu->kvm); kvm_put_kvm(vcpu->kvm);
......
...@@ -422,7 +422,9 @@ static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) ...@@ -422,7 +422,9 @@ static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
vcpu->kvm = kvm; vcpu->kvm = kvm;
vcpu->vcpu_id = id; vcpu->vcpu_id = id;
vcpu->pid = NULL; vcpu->pid = NULL;
#ifndef __KVM_HAVE_ARCH_WQP
rcuwait_init(&vcpu->wait); rcuwait_init(&vcpu->wait);
#endif
kvm_async_pf_vcpu_init(vcpu); kvm_async_pf_vcpu_init(vcpu);
vcpu->pre_pcpu = -1; vcpu->pre_pcpu = -1;
...@@ -3284,6 +3286,7 @@ update_halt_poll_stats(struct kvm_vcpu *vcpu, u64 poll_ns, bool waited) ...@@ -3284,6 +3286,7 @@ update_halt_poll_stats(struct kvm_vcpu *vcpu, u64 poll_ns, bool waited)
*/ */
void kvm_vcpu_block(struct kvm_vcpu *vcpu) void kvm_vcpu_block(struct kvm_vcpu *vcpu)
{ {
struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu);
bool halt_poll_allowed = !kvm_arch_no_poll(vcpu); bool halt_poll_allowed = !kvm_arch_no_poll(vcpu);
ktime_t start, cur, poll_end; ktime_t start, cur, poll_end;
bool waited = false; bool waited = false;
...@@ -3322,7 +3325,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu) ...@@ -3322,7 +3325,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
} }
prepare_to_rcuwait(&vcpu->wait); prepare_to_rcuwait(wait);
for (;;) { for (;;) {
set_current_state(TASK_INTERRUPTIBLE); set_current_state(TASK_INTERRUPTIBLE);
...@@ -3332,7 +3335,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu) ...@@ -3332,7 +3335,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
waited = true; waited = true;
schedule(); schedule();
} }
finish_rcuwait(&vcpu->wait); finish_rcuwait(wait);
cur = ktime_get(); cur = ktime_get();
if (waited) { if (waited) {
vcpu->stat.generic.halt_wait_ns += vcpu->stat.generic.halt_wait_ns +=
...@@ -3544,7 +3547,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode) ...@@ -3544,7 +3547,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
continue; continue;
if (vcpu == me) if (vcpu == me)
continue; continue;
if (rcuwait_active(&vcpu->wait) && if (rcuwait_active(kvm_arch_vcpu_get_wait(vcpu)) &&
!vcpu_dy_runnable(vcpu)) !vcpu_dy_runnable(vcpu))
continue; continue;
if (READ_ONCE(vcpu->preempted) && yield_to_kernel_mode && if (READ_ONCE(vcpu->preempted) && yield_to_kernel_mode &&
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment