Commit eb162c94 authored by Paolo Bonzini's avatar Paolo Bonzini

Merge branch 'kvm-tdx-prep-1-truncated' into HEAD

A rename and refactoring extracted from the preparatory series for
Intel TDX support in KVM's MMU.
parents 27e6a24a c2f38f75
...@@ -3448,7 +3448,7 @@ static int fast_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) ...@@ -3448,7 +3448,7 @@ static int fast_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
u64 new_spte; u64 new_spte;
if (tdp_mmu_enabled) if (tdp_mmu_enabled)
sptep = kvm_tdp_mmu_fast_pf_get_last_sptep(vcpu, fault->addr, &spte); sptep = kvm_tdp_mmu_fast_pf_get_last_sptep(vcpu, fault->gfn, &spte);
else else
sptep = fast_pf_get_last_sptep(vcpu, fault->addr, &spte); sptep = fast_pf_get_last_sptep(vcpu, fault->addr, &spte);
...@@ -3458,7 +3458,7 @@ static int fast_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) ...@@ -3458,7 +3458,7 @@ static int fast_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
* available as the vCPU holds a reference to its root(s). * available as the vCPU holds a reference to its root(s).
*/ */
if (WARN_ON_ONCE(!sptep)) if (WARN_ON_ONCE(!sptep))
spte = REMOVED_SPTE; spte = FROZEN_SPTE;
if (!is_shadow_present_pte(spte)) if (!is_shadow_present_pte(spte))
break; break;
......
...@@ -383,7 +383,7 @@ void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask) ...@@ -383,7 +383,7 @@ void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask)
* not set any RWX bits. * not set any RWX bits.
*/ */
if (WARN_ON((mmio_value & mmio_mask) != mmio_value) || if (WARN_ON((mmio_value & mmio_mask) != mmio_value) ||
WARN_ON(mmio_value && (REMOVED_SPTE & mmio_mask) == mmio_value)) WARN_ON(mmio_value && (FROZEN_SPTE & mmio_mask) == mmio_value))
mmio_value = 0; mmio_value = 0;
if (!mmio_value) if (!mmio_value)
......
...@@ -202,7 +202,7 @@ extern u64 __read_mostly shadow_nonpresent_or_rsvd_mask; ...@@ -202,7 +202,7 @@ extern u64 __read_mostly shadow_nonpresent_or_rsvd_mask;
/* /*
* If a thread running without exclusive control of the MMU lock must perform a * If a thread running without exclusive control of the MMU lock must perform a
* multi-part operation on an SPTE, it can set the SPTE to REMOVED_SPTE as a * multi-part operation on an SPTE, it can set the SPTE to FROZEN_SPTE as a
* non-present intermediate value. Other threads which encounter this value * non-present intermediate value. Other threads which encounter this value
* should not modify the SPTE. * should not modify the SPTE.
* *
...@@ -212,14 +212,14 @@ extern u64 __read_mostly shadow_nonpresent_or_rsvd_mask; ...@@ -212,14 +212,14 @@ extern u64 __read_mostly shadow_nonpresent_or_rsvd_mask;
* *
* Only used by the TDP MMU. * Only used by the TDP MMU.
*/ */
#define REMOVED_SPTE (SHADOW_NONPRESENT_VALUE | 0x5a0ULL) #define FROZEN_SPTE (SHADOW_NONPRESENT_VALUE | 0x5a0ULL)
/* Removed SPTEs must not be misconstrued as shadow present PTEs. */ /* Removed SPTEs must not be misconstrued as shadow present PTEs. */
static_assert(!(REMOVED_SPTE & SPTE_MMU_PRESENT_MASK)); static_assert(!(FROZEN_SPTE & SPTE_MMU_PRESENT_MASK));
static inline bool is_removed_spte(u64 spte) static inline bool is_frozen_spte(u64 spte)
{ {
return spte == REMOVED_SPTE; return spte == FROZEN_SPTE;
} }
/* Get an SPTE's index into its parent's page table (and the spt array). */ /* Get an SPTE's index into its parent's page table (and the spt array). */
......
...@@ -365,8 +365,8 @@ static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared) ...@@ -365,8 +365,8 @@ static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared)
* value to the removed SPTE value. * value to the removed SPTE value.
*/ */
for (;;) { for (;;) {
old_spte = kvm_tdp_mmu_write_spte_atomic(sptep, REMOVED_SPTE); old_spte = kvm_tdp_mmu_write_spte_atomic(sptep, FROZEN_SPTE);
if (!is_removed_spte(old_spte)) if (!is_frozen_spte(old_spte))
break; break;
cpu_relax(); cpu_relax();
} }
...@@ -397,11 +397,11 @@ static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared) ...@@ -397,11 +397,11 @@ static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared)
* No retry is needed in the atomic update path as the * No retry is needed in the atomic update path as the
* sole concern is dropping a Dirty bit, i.e. no other * sole concern is dropping a Dirty bit, i.e. no other
* task can zap/remove the SPTE as mmu_lock is held for * task can zap/remove the SPTE as mmu_lock is held for
* write. Marking the SPTE as a removed SPTE is not * write. Marking the SPTE as a frozen SPTE is not
* strictly necessary for the same reason, but using * strictly necessary for the same reason, but using
* the remove SPTE value keeps the shared/exclusive * the frozen SPTE value keeps the shared/exclusive
* paths consistent and allows the handle_changed_spte() * paths consistent and allows the handle_changed_spte()
* call below to hardcode the new value to REMOVED_SPTE. * call below to hardcode the new value to FROZEN_SPTE.
* *
* Note, even though dropping a Dirty bit is the only * Note, even though dropping a Dirty bit is the only
* scenario where a non-atomic update could result in a * scenario where a non-atomic update could result in a
...@@ -413,10 +413,10 @@ static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared) ...@@ -413,10 +413,10 @@ static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared)
* it here. * it here.
*/ */
old_spte = kvm_tdp_mmu_write_spte(sptep, old_spte, old_spte = kvm_tdp_mmu_write_spte(sptep, old_spte,
REMOVED_SPTE, level); FROZEN_SPTE, level);
} }
handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn, handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn,
old_spte, REMOVED_SPTE, level, shared); old_spte, FROZEN_SPTE, level, shared);
} }
call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback); call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback);
...@@ -490,19 +490,19 @@ static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, ...@@ -490,19 +490,19 @@ static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
*/ */
if (!was_present && !is_present) { if (!was_present && !is_present) {
/* /*
* If this change does not involve a MMIO SPTE or removed SPTE, * If this change does not involve a MMIO SPTE or frozen SPTE,
* it is unexpected. Log the change, though it should not * it is unexpected. Log the change, though it should not
* impact the guest since both the former and current SPTEs * impact the guest since both the former and current SPTEs
* are nonpresent. * are nonpresent.
*/ */
if (WARN_ON_ONCE(!is_mmio_spte(kvm, old_spte) && if (WARN_ON_ONCE(!is_mmio_spte(kvm, old_spte) &&
!is_mmio_spte(kvm, new_spte) && !is_mmio_spte(kvm, new_spte) &&
!is_removed_spte(new_spte))) !is_frozen_spte(new_spte)))
pr_err("Unexpected SPTE change! Nonpresent SPTEs\n" pr_err("Unexpected SPTE change! Nonpresent SPTEs\n"
"should not be replaced with another,\n" "should not be replaced with another,\n"
"different nonpresent SPTE, unless one or both\n" "different nonpresent SPTE, unless one or both\n"
"are MMIO SPTEs, or the new SPTE is\n" "are MMIO SPTEs, or the new SPTE is\n"
"a temporary removed SPTE.\n" "a temporary frozen SPTE.\n"
"as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d", "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
as_id, gfn, old_spte, new_spte, level); as_id, gfn, old_spte, new_spte, level);
return; return;
...@@ -541,7 +541,7 @@ static inline int __must_check __tdp_mmu_set_spte_atomic(struct tdp_iter *iter, ...@@ -541,7 +541,7 @@ static inline int __must_check __tdp_mmu_set_spte_atomic(struct tdp_iter *iter,
* and pre-checking before inserting a new SPTE is advantageous as it * and pre-checking before inserting a new SPTE is advantageous as it
* avoids unnecessary work. * avoids unnecessary work.
*/ */
WARN_ON_ONCE(iter->yielded || is_removed_spte(iter->old_spte)); WARN_ON_ONCE(iter->yielded || is_frozen_spte(iter->old_spte));
/* /*
* Note, fast_pf_fix_direct_spte() can also modify TDP MMU SPTEs and * Note, fast_pf_fix_direct_spte() can also modify TDP MMU SPTEs and
...@@ -604,26 +604,26 @@ static inline int __must_check tdp_mmu_zap_spte_atomic(struct kvm *kvm, ...@@ -604,26 +604,26 @@ static inline int __must_check tdp_mmu_zap_spte_atomic(struct kvm *kvm,
* in its place before the TLBs are flushed. * in its place before the TLBs are flushed.
* *
* Delay processing of the zapped SPTE until after TLBs are flushed and * Delay processing of the zapped SPTE until after TLBs are flushed and
* the REMOVED_SPTE is replaced (see below). * the FROZEN_SPTE is replaced (see below).
*/ */
ret = __tdp_mmu_set_spte_atomic(iter, REMOVED_SPTE); ret = __tdp_mmu_set_spte_atomic(iter, FROZEN_SPTE);
if (ret) if (ret)
return ret; return ret;
kvm_flush_remote_tlbs_gfn(kvm, iter->gfn, iter->level); kvm_flush_remote_tlbs_gfn(kvm, iter->gfn, iter->level);
/* /*
* No other thread can overwrite the removed SPTE as they must either * No other thread can overwrite the frozen SPTE as they must either
* wait on the MMU lock or use tdp_mmu_set_spte_atomic() which will not * wait on the MMU lock or use tdp_mmu_set_spte_atomic() which will not
* overwrite the special removed SPTE value. Use the raw write helper to * overwrite the special frozen SPTE value. Use the raw write helper to
* avoid an unnecessary check on volatile bits. * avoid an unnecessary check on volatile bits.
*/ */
__kvm_tdp_mmu_write_spte(iter->sptep, SHADOW_NONPRESENT_VALUE); __kvm_tdp_mmu_write_spte(iter->sptep, SHADOW_NONPRESENT_VALUE);
/* /*
* Process the zapped SPTE after flushing TLBs, and after replacing * Process the zapped SPTE after flushing TLBs, and after replacing
* REMOVED_SPTE with 0. This minimizes the amount of time vCPUs are * FROZEN_SPTE with 0. This minimizes the amount of time vCPUs are
* blocked by the REMOVED_SPTE and reduces contention on the child * blocked by the FROZEN_SPTE and reduces contention on the child
* SPTEs. * SPTEs.
*/ */
handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte, handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
...@@ -653,12 +653,12 @@ static u64 tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep, ...@@ -653,12 +653,12 @@ static u64 tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep,
/* /*
* No thread should be using this function to set SPTEs to or from the * No thread should be using this function to set SPTEs to or from the
* temporary removed SPTE value. * temporary frozen SPTE value.
* If operating under the MMU lock in read mode, tdp_mmu_set_spte_atomic * If operating under the MMU lock in read mode, tdp_mmu_set_spte_atomic
* should be used. If operating under the MMU lock in write mode, the * should be used. If operating under the MMU lock in write mode, the
* use of the removed SPTE should not be necessary. * use of the frozen SPTE should not be necessary.
*/ */
WARN_ON_ONCE(is_removed_spte(old_spte) || is_removed_spte(new_spte)); WARN_ON_ONCE(is_frozen_spte(old_spte) || is_frozen_spte(new_spte));
old_spte = kvm_tdp_mmu_write_spte(sptep, old_spte, new_spte, level); old_spte = kvm_tdp_mmu_write_spte(sptep, old_spte, new_spte, level);
...@@ -1127,7 +1127,7 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) ...@@ -1127,7 +1127,7 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
* If SPTE has been frozen by another thread, just give up and * If SPTE has been frozen by another thread, just give up and
* retry, avoiding unnecessary page table allocation and free. * retry, avoiding unnecessary page table allocation and free.
*/ */
if (is_removed_spte(iter.old_spte)) if (is_frozen_spte(iter.old_spte))
goto retry; goto retry;
if (iter.level == fault->goal_level) if (iter.level == fault->goal_level)
...@@ -1802,12 +1802,11 @@ int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, ...@@ -1802,12 +1802,11 @@ int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
* *
* WARNING: This function is only intended to be called during fast_page_fault. * WARNING: This function is only intended to be called during fast_page_fault.
*/ */
u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, u64 addr, u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, gfn_t gfn,
u64 *spte) u64 *spte)
{ {
struct tdp_iter iter; struct tdp_iter iter;
struct kvm_mmu *mmu = vcpu->arch.mmu; struct kvm_mmu *mmu = vcpu->arch.mmu;
gfn_t gfn = addr >> PAGE_SHIFT;
tdp_ptep_t sptep = NULL; tdp_ptep_t sptep = NULL;
tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) { tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
......
...@@ -64,7 +64,7 @@ static inline void kvm_tdp_mmu_walk_lockless_end(void) ...@@ -64,7 +64,7 @@ static inline void kvm_tdp_mmu_walk_lockless_end(void)
int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
int *root_level); int *root_level);
u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, u64 addr, u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, gfn_t gfn,
u64 *spte); u64 *spte);
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment