Commit e6b8fdbe authored by Paul Mackerras's avatar Paul Mackerras Committed by Stefan Bader

KVM: PPC: Book3S: Use new mutex to synchronize access to rtas token list

BugLink: https://bugs.launchpad.net/bugs/1836666

[ Upstream commit 1659e27d ]

Currently the Book 3S KVM code uses kvm->lock to synchronize access
to the kvm->arch.rtas_tokens list.  Because this list is scanned
inside kvmppc_rtas_hcall(), which is called with the vcpu mutex held,
taking kvm->lock cause a lock inversion problem, which could lead to
a deadlock.

To fix this, we add a new mutex, kvm->arch.rtas_token_lock, which nests
inside the vcpu mutexes, and use that instead of kvm->lock when
accessing the rtas token list.

This removes the lockdep_assert_held() in kvmppc_rtas_tokens_free().
At this point we don't hold the new mutex, but that is OK because
kvmppc_rtas_tokens_free() is only called when the whole VM is being
destroyed, and at that point nothing can be looking up a token in
the list.
Signed-off-by: default avatarPaul Mackerras <paulus@ozlabs.org>
Signed-off-by: default avatarSasha Levin <sashal@kernel.org>
Signed-off-by: default avatarKhalid Elmously <khalid.elmously@canonical.com>
Signed-off-by: default avatarKleber Sacilotto de Souza <kleber.souza@canonical.com>
parent 67a518f4
...@@ -250,6 +250,7 @@ struct kvm_arch { ...@@ -250,6 +250,7 @@ struct kvm_arch {
#ifdef CONFIG_PPC_BOOK3S_64 #ifdef CONFIG_PPC_BOOK3S_64
struct list_head spapr_tce_tables; struct list_head spapr_tce_tables;
struct list_head rtas_tokens; struct list_head rtas_tokens;
struct mutex rtas_token_lock;
DECLARE_BITMAP(enabled_hcalls, MAX_HCALL_OPCODE/4 + 1); DECLARE_BITMAP(enabled_hcalls, MAX_HCALL_OPCODE/4 + 1);
#endif #endif
#ifdef CONFIG_KVM_MPIC #ifdef CONFIG_KVM_MPIC
......
...@@ -803,6 +803,7 @@ int kvmppc_core_init_vm(struct kvm *kvm) ...@@ -803,6 +803,7 @@ int kvmppc_core_init_vm(struct kvm *kvm)
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables); INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
INIT_LIST_HEAD(&kvm->arch.rtas_tokens); INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
mutex_init(&kvm->arch.rtas_token_lock);
#endif #endif
return kvm->arch.kvm_ops->init_vm(kvm); return kvm->arch.kvm_ops->init_vm(kvm);
......
...@@ -133,7 +133,7 @@ static int rtas_token_undefine(struct kvm *kvm, char *name) ...@@ -133,7 +133,7 @@ static int rtas_token_undefine(struct kvm *kvm, char *name)
{ {
struct rtas_token_definition *d, *tmp; struct rtas_token_definition *d, *tmp;
lockdep_assert_held(&kvm->lock); lockdep_assert_held(&kvm->arch.rtas_token_lock);
list_for_each_entry_safe(d, tmp, &kvm->arch.rtas_tokens, list) { list_for_each_entry_safe(d, tmp, &kvm->arch.rtas_tokens, list) {
if (rtas_name_matches(d->handler->name, name)) { if (rtas_name_matches(d->handler->name, name)) {
...@@ -154,7 +154,7 @@ static int rtas_token_define(struct kvm *kvm, char *name, u64 token) ...@@ -154,7 +154,7 @@ static int rtas_token_define(struct kvm *kvm, char *name, u64 token)
bool found; bool found;
int i; int i;
lockdep_assert_held(&kvm->lock); lockdep_assert_held(&kvm->arch.rtas_token_lock);
list_for_each_entry(d, &kvm->arch.rtas_tokens, list) { list_for_each_entry(d, &kvm->arch.rtas_tokens, list) {
if (d->token == token) if (d->token == token)
...@@ -193,14 +193,14 @@ int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp) ...@@ -193,14 +193,14 @@ int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp)
if (copy_from_user(&args, argp, sizeof(args))) if (copy_from_user(&args, argp, sizeof(args)))
return -EFAULT; return -EFAULT;
mutex_lock(&kvm->lock); mutex_lock(&kvm->arch.rtas_token_lock);
if (args.token) if (args.token)
rc = rtas_token_define(kvm, args.name, args.token); rc = rtas_token_define(kvm, args.name, args.token);
else else
rc = rtas_token_undefine(kvm, args.name); rc = rtas_token_undefine(kvm, args.name);
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->arch.rtas_token_lock);
return rc; return rc;
} }
...@@ -232,7 +232,7 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu) ...@@ -232,7 +232,7 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
orig_rets = args.rets; orig_rets = args.rets;
args.rets = &args.args[be32_to_cpu(args.nargs)]; args.rets = &args.args[be32_to_cpu(args.nargs)];
mutex_lock(&vcpu->kvm->lock); mutex_lock(&vcpu->kvm->arch.rtas_token_lock);
rc = -ENOENT; rc = -ENOENT;
list_for_each_entry(d, &vcpu->kvm->arch.rtas_tokens, list) { list_for_each_entry(d, &vcpu->kvm->arch.rtas_tokens, list) {
...@@ -243,7 +243,7 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu) ...@@ -243,7 +243,7 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
} }
} }
mutex_unlock(&vcpu->kvm->lock); mutex_unlock(&vcpu->kvm->arch.rtas_token_lock);
if (rc == 0) { if (rc == 0) {
args.rets = orig_rets; args.rets = orig_rets;
...@@ -269,8 +269,6 @@ void kvmppc_rtas_tokens_free(struct kvm *kvm) ...@@ -269,8 +269,6 @@ void kvmppc_rtas_tokens_free(struct kvm *kvm)
{ {
struct rtas_token_definition *d, *tmp; struct rtas_token_definition *d, *tmp;
lockdep_assert_held(&kvm->lock);
list_for_each_entry_safe(d, tmp, &kvm->arch.rtas_tokens, list) { list_for_each_entry_safe(d, tmp, &kvm->arch.rtas_tokens, list) {
list_del(&d->list); list_del(&d->list);
kfree(d); kfree(d);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment