Commit c5c8c7c5 authored by David Matlack's avatar David Matlack Committed by Paolo Bonzini

KVM: x86/mmu: Make walk_shadow_page_lockless_{begin,end} interoperate with the TDP MMU

Acquire the RCU read lock in walk_shadow_page_lockless_begin and release
it in walk_shadow_page_lockless_end when the TDP MMU is enabled.  This
should not introduce any functional changes but is used in the following
commit to make fast_page_fault interoperate with the TDP MMU.
Signed-off-by: default avatarDavid Matlack <dmatlack@google.com>
Message-Id: <20210713220957.3493520-4-dmatlack@google.com>
[Use if...else instead of if(){return;}]
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 61bcd360
...@@ -686,28 +686,36 @@ static bool mmu_spte_age(u64 *sptep) ...@@ -686,28 +686,36 @@ static bool mmu_spte_age(u64 *sptep)
static void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu) static void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu)
{ {
/* if (is_tdp_mmu(vcpu->arch.mmu)) {
* Prevent page table teardown by making any free-er wait during kvm_tdp_mmu_walk_lockless_begin();
* kvm_flush_remote_tlbs() IPI to all active vcpus. } else {
*/ /*
local_irq_disable(); * Prevent page table teardown by making any free-er wait during
* kvm_flush_remote_tlbs() IPI to all active vcpus.
*/
local_irq_disable();
/* /*
* Make sure a following spte read is not reordered ahead of the write * Make sure a following spte read is not reordered ahead of the write
* to vcpu->mode. * to vcpu->mode.
*/ */
smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES); smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES);
}
} }
static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu) static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
{ {
/* if (is_tdp_mmu(vcpu->arch.mmu)) {
* Make sure the write to vcpu->mode is not reordered in front of kvm_tdp_mmu_walk_lockless_end();
* reads to sptes. If it does, kvm_mmu_commit_zap_page() can see us } else {
* OUTSIDE_GUEST_MODE and proceed to free the shadow page table. /*
*/ * Make sure the write to vcpu->mode is not reordered in front of
smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE); * reads to sptes. If it does, kvm_mmu_commit_zap_page() can see us
local_irq_enable(); * OUTSIDE_GUEST_MODE and proceed to free the shadow page table.
*/
smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE);
local_irq_enable();
}
} }
static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect) static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
...@@ -3617,6 +3625,8 @@ static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct) ...@@ -3617,6 +3625,8 @@ static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct)
/* /*
* Return the level of the lowest level SPTE added to sptes. * Return the level of the lowest level SPTE added to sptes.
* That SPTE may be non-present. * That SPTE may be non-present.
*
* Must be called between walk_shadow_page_lockless_{begin,end}.
*/ */
static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level) static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level)
{ {
...@@ -3624,8 +3634,6 @@ static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level ...@@ -3624,8 +3634,6 @@ static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level
int leaf = -1; int leaf = -1;
u64 spte; u64 spte;
walk_shadow_page_lockless_begin(vcpu);
for (shadow_walk_init(&iterator, vcpu, addr), for (shadow_walk_init(&iterator, vcpu, addr),
*root_level = iterator.level; *root_level = iterator.level;
shadow_walk_okay(&iterator); shadow_walk_okay(&iterator);
...@@ -3639,8 +3647,6 @@ static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level ...@@ -3639,8 +3647,6 @@ static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level
break; break;
} }
walk_shadow_page_lockless_end(vcpu);
return leaf; return leaf;
} }
...@@ -3652,11 +3658,15 @@ static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep) ...@@ -3652,11 +3658,15 @@ static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
int root, leaf, level; int root, leaf, level;
bool reserved = false; bool reserved = false;
walk_shadow_page_lockless_begin(vcpu);
if (is_tdp_mmu(vcpu->arch.mmu)) if (is_tdp_mmu(vcpu->arch.mmu))
leaf = kvm_tdp_mmu_get_walk(vcpu, addr, sptes, &root); leaf = kvm_tdp_mmu_get_walk(vcpu, addr, sptes, &root);
else else
leaf = get_walk(vcpu, addr, sptes, &root); leaf = get_walk(vcpu, addr, sptes, &root);
walk_shadow_page_lockless_end(vcpu);
if (unlikely(leaf < 0)) { if (unlikely(leaf < 0)) {
*sptep = 0ull; *sptep = 0ull;
return reserved; return reserved;
......
...@@ -1516,6 +1516,8 @@ bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm, ...@@ -1516,6 +1516,8 @@ bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
/* /*
* Return the level of the lowest level SPTE added to sptes. * Return the level of the lowest level SPTE added to sptes.
* That SPTE may be non-present. * That SPTE may be non-present.
*
* Must be called between kvm_tdp_mmu_walk_lockless_{begin,end}.
*/ */
int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
int *root_level) int *root_level)
...@@ -1527,14 +1529,10 @@ int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, ...@@ -1527,14 +1529,10 @@ int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
*root_level = vcpu->arch.mmu->shadow_root_level; *root_level = vcpu->arch.mmu->shadow_root_level;
rcu_read_lock();
tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) { tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
leaf = iter.level; leaf = iter.level;
sptes[leaf] = iter.old_spte; sptes[leaf] = iter.old_spte;
} }
rcu_read_unlock();
return leaf; return leaf;
} }
...@@ -77,6 +77,16 @@ bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm, ...@@ -77,6 +77,16 @@ bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
struct kvm_memory_slot *slot, gfn_t gfn, struct kvm_memory_slot *slot, gfn_t gfn,
int min_level); int min_level);
static inline void kvm_tdp_mmu_walk_lockless_begin(void)
{
rcu_read_lock();
}
static inline void kvm_tdp_mmu_walk_lockless_end(void)
{
rcu_read_unlock();
}
int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
int *root_level); int *root_level);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment