Commit 614f6970 authored by Paolo Bonzini's avatar Paolo Bonzini

KVM: x86/mmu: do not allow readers to acquire references to invalid roots

Remove the "shared" argument of for_each_tdp_mmu_root_yield_safe, thus ensuring
that readers do not ever acquire a reference to an invalid root.  After this
patch, all readers except kvm_tdp_mmu_zap_invalidated_roots() treat
refcount=0/valid, refcount=0/invalid and refcount=1/invalid in exactly the
same way.  kvm_tdp_mmu_zap_invalidated_roots() is different but it also
does not acquire a reference to the invalid root, and it cannot see
refcount=0/invalid because it is guaranteed to run after
kvm_tdp_mmu_invalidate_all_roots().

Opportunistically add a lockdep assertion to the yield-safe iterator.
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 7c554d8e
...@@ -166,14 +166,15 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm, ...@@ -166,14 +166,15 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
for (_root = tdp_mmu_next_root(_kvm, NULL, _shared, _only_valid); \ for (_root = tdp_mmu_next_root(_kvm, NULL, _shared, _only_valid); \
_root; \ _root; \
_root = tdp_mmu_next_root(_kvm, _root, _shared, _only_valid)) \ _root = tdp_mmu_next_root(_kvm, _root, _shared, _only_valid)) \
if (kvm_mmu_page_as_id(_root) != _as_id) { \ if (kvm_lockdep_assert_mmu_lock_held(_kvm, _shared) && \
kvm_mmu_page_as_id(_root) != _as_id) { \
} else } else
#define for_each_valid_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared) \ #define for_each_valid_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared) \
__for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, true) __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, true)
#define for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared) \ #define for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id) \
__for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, false) __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, false, false)
/* /*
* Iterate over all TDP MMU roots. Requires that mmu_lock be held for write, * Iterate over all TDP MMU roots. Requires that mmu_lock be held for write,
...@@ -808,7 +809,7 @@ bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start, ...@@ -808,7 +809,7 @@ bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start,
{ {
struct kvm_mmu_page *root; struct kvm_mmu_page *root;
for_each_tdp_mmu_root_yield_safe(kvm, root, as_id, false) for_each_tdp_mmu_root_yield_safe(kvm, root, as_id)
flush = zap_gfn_range(kvm, root, start, end, can_yield, flush, flush = zap_gfn_range(kvm, root, start, end, can_yield, flush,
false); false);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment