Commit 657f1d86 authored by Paolo Bonzini's avatar Paolo Bonzini

Merge branch 'kvm-tdp-fix-rcu' into HEAD

parents 57e45ea4 08889894
...@@ -88,6 +88,11 @@ static inline struct kvm_mmu_page *sptep_to_sp(u64 *sptep) ...@@ -88,6 +88,11 @@ static inline struct kvm_mmu_page *sptep_to_sp(u64 *sptep)
return to_shadow_page(__pa(sptep)); return to_shadow_page(__pa(sptep));
} }
static inline int kvm_mmu_page_as_id(struct kvm_mmu_page *sp)
{
return sp->role.smm ? 1 : 0;
}
static inline bool kvm_vcpu_ad_need_write_protect(struct kvm_vcpu *vcpu) static inline bool kvm_vcpu_ad_need_write_protect(struct kvm_vcpu *vcpu)
{ {
/* /*
......
...@@ -20,6 +20,21 @@ static gfn_t round_gfn_for_level(gfn_t gfn, int level) ...@@ -20,6 +20,21 @@ static gfn_t round_gfn_for_level(gfn_t gfn, int level)
return gfn & -KVM_PAGES_PER_HPAGE(level); return gfn & -KVM_PAGES_PER_HPAGE(level);
} }
/*
* Return the TDP iterator to the root PT and allow it to continue its
* traversal over the paging structure from there.
*/
void tdp_iter_restart(struct tdp_iter *iter)
{
iter->yielded_gfn = iter->next_last_level_gfn;
iter->level = iter->root_level;
iter->gfn = round_gfn_for_level(iter->next_last_level_gfn, iter->level);
tdp_iter_refresh_sptep(iter);
iter->valid = true;
}
/* /*
* Sets a TDP iterator to walk a pre-order traversal of the paging structure * Sets a TDP iterator to walk a pre-order traversal of the paging structure
* rooted at root_pt, starting with the walk to translate next_last_level_gfn. * rooted at root_pt, starting with the walk to translate next_last_level_gfn.
...@@ -31,16 +46,12 @@ void tdp_iter_start(struct tdp_iter *iter, u64 *root_pt, int root_level, ...@@ -31,16 +46,12 @@ void tdp_iter_start(struct tdp_iter *iter, u64 *root_pt, int root_level,
WARN_ON(root_level > PT64_ROOT_MAX_LEVEL); WARN_ON(root_level > PT64_ROOT_MAX_LEVEL);
iter->next_last_level_gfn = next_last_level_gfn; iter->next_last_level_gfn = next_last_level_gfn;
iter->yielded_gfn = iter->next_last_level_gfn;
iter->root_level = root_level; iter->root_level = root_level;
iter->min_level = min_level; iter->min_level = min_level;
iter->level = root_level; iter->pt_path[iter->root_level - 1] = (tdp_ptep_t)root_pt;
iter->pt_path[iter->level - 1] = (tdp_ptep_t)root_pt; iter->as_id = kvm_mmu_page_as_id(sptep_to_sp(root_pt));
iter->gfn = round_gfn_for_level(iter->next_last_level_gfn, iter->level);
tdp_iter_refresh_sptep(iter);
iter->valid = true; tdp_iter_restart(iter);
} }
/* /*
...@@ -159,8 +170,3 @@ void tdp_iter_next(struct tdp_iter *iter) ...@@ -159,8 +170,3 @@ void tdp_iter_next(struct tdp_iter *iter)
iter->valid = false; iter->valid = false;
} }
tdp_ptep_t tdp_iter_root_pt(struct tdp_iter *iter)
{
return iter->pt_path[iter->root_level - 1];
}
...@@ -36,6 +36,8 @@ struct tdp_iter { ...@@ -36,6 +36,8 @@ struct tdp_iter {
int min_level; int min_level;
/* The iterator's current level within the paging structure */ /* The iterator's current level within the paging structure */
int level; int level;
/* The address space ID, i.e. SMM vs. regular. */
int as_id;
/* A snapshot of the value at sptep */ /* A snapshot of the value at sptep */
u64 old_spte; u64 old_spte;
/* /*
...@@ -62,6 +64,6 @@ tdp_ptep_t spte_to_child_pt(u64 pte, int level); ...@@ -62,6 +64,6 @@ tdp_ptep_t spte_to_child_pt(u64 pte, int level);
void tdp_iter_start(struct tdp_iter *iter, u64 *root_pt, int root_level, void tdp_iter_start(struct tdp_iter *iter, u64 *root_pt, int root_level,
int min_level, gfn_t next_last_level_gfn); int min_level, gfn_t next_last_level_gfn);
void tdp_iter_next(struct tdp_iter *iter); void tdp_iter_next(struct tdp_iter *iter);
tdp_ptep_t tdp_iter_root_pt(struct tdp_iter *iter); void tdp_iter_restart(struct tdp_iter *iter);
#endif /* __KVM_X86_MMU_TDP_ITER_H */ #endif /* __KVM_X86_MMU_TDP_ITER_H */
...@@ -190,11 +190,6 @@ static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, ...@@ -190,11 +190,6 @@ static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
u64 old_spte, u64 new_spte, int level, u64 old_spte, u64 new_spte, int level,
bool shared); bool shared);
static int kvm_mmu_page_as_id(struct kvm_mmu_page *sp)
{
return sp->role.smm ? 1 : 0;
}
static void handle_changed_spte_acc_track(u64 old_spte, u64 new_spte, int level) static void handle_changed_spte_acc_track(u64 old_spte, u64 new_spte, int level)
{ {
if (!is_shadow_present_pte(old_spte) || !is_last_spte(old_spte, level)) if (!is_shadow_present_pte(old_spte) || !is_last_spte(old_spte, level))
...@@ -287,11 +282,16 @@ static void tdp_mmu_unlink_page(struct kvm *kvm, struct kvm_mmu_page *sp, ...@@ -287,11 +282,16 @@ static void tdp_mmu_unlink_page(struct kvm *kvm, struct kvm_mmu_page *sp,
* *
* Given a page table that has been removed from the TDP paging structure, * Given a page table that has been removed from the TDP paging structure,
* iterates through the page table to clear SPTEs and free child page tables. * iterates through the page table to clear SPTEs and free child page tables.
*
* Note that pt is passed in as a tdp_ptep_t, but it does not need RCU
* protection. Since this thread removed it from the paging structure,
* this thread will be responsible for ensuring the page is freed. Hence the
* early rcu_dereferences in the function.
*/ */
static void handle_removed_tdp_mmu_page(struct kvm *kvm, u64 *pt, static void handle_removed_tdp_mmu_page(struct kvm *kvm, tdp_ptep_t pt,
bool shared) bool shared)
{ {
struct kvm_mmu_page *sp = sptep_to_sp(pt); struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(pt));
int level = sp->role.level; int level = sp->role.level;
gfn_t base_gfn = sp->gfn; gfn_t base_gfn = sp->gfn;
u64 old_child_spte; u64 old_child_spte;
...@@ -304,7 +304,7 @@ static void handle_removed_tdp_mmu_page(struct kvm *kvm, u64 *pt, ...@@ -304,7 +304,7 @@ static void handle_removed_tdp_mmu_page(struct kvm *kvm, u64 *pt,
tdp_mmu_unlink_page(kvm, sp, shared); tdp_mmu_unlink_page(kvm, sp, shared);
for (i = 0; i < PT64_ENT_PER_PAGE; i++) { for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
sptep = pt + i; sptep = rcu_dereference(pt) + i;
gfn = base_gfn + (i * KVM_PAGES_PER_HPAGE(level - 1)); gfn = base_gfn + (i * KVM_PAGES_PER_HPAGE(level - 1));
if (shared) { if (shared) {
...@@ -478,10 +478,6 @@ static inline bool tdp_mmu_set_spte_atomic(struct kvm *kvm, ...@@ -478,10 +478,6 @@ static inline bool tdp_mmu_set_spte_atomic(struct kvm *kvm,
struct tdp_iter *iter, struct tdp_iter *iter,
u64 new_spte) u64 new_spte)
{ {
u64 *root_pt = tdp_iter_root_pt(iter);
struct kvm_mmu_page *root = sptep_to_sp(root_pt);
int as_id = kvm_mmu_page_as_id(root);
lockdep_assert_held_read(&kvm->mmu_lock); lockdep_assert_held_read(&kvm->mmu_lock);
/* /*
...@@ -495,8 +491,8 @@ static inline bool tdp_mmu_set_spte_atomic(struct kvm *kvm, ...@@ -495,8 +491,8 @@ static inline bool tdp_mmu_set_spte_atomic(struct kvm *kvm,
new_spte) != iter->old_spte) new_spte) != iter->old_spte)
return false; return false;
handle_changed_spte(kvm, as_id, iter->gfn, iter->old_spte, new_spte, handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
iter->level, true); new_spte, iter->level, true);
return true; return true;
} }
...@@ -524,7 +520,7 @@ static inline bool tdp_mmu_zap_spte_atomic(struct kvm *kvm, ...@@ -524,7 +520,7 @@ static inline bool tdp_mmu_zap_spte_atomic(struct kvm *kvm,
* here since the SPTE is going from non-present * here since the SPTE is going from non-present
* to non-present. * to non-present.
*/ */
WRITE_ONCE(*iter->sptep, 0); WRITE_ONCE(*rcu_dereference(iter->sptep), 0);
return true; return true;
} }
...@@ -550,10 +546,6 @@ static inline void __tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter, ...@@ -550,10 +546,6 @@ static inline void __tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
u64 new_spte, bool record_acc_track, u64 new_spte, bool record_acc_track,
bool record_dirty_log) bool record_dirty_log)
{ {
tdp_ptep_t root_pt = tdp_iter_root_pt(iter);
struct kvm_mmu_page *root = sptep_to_sp(root_pt);
int as_id = kvm_mmu_page_as_id(root);
lockdep_assert_held_write(&kvm->mmu_lock); lockdep_assert_held_write(&kvm->mmu_lock);
/* /*
...@@ -567,13 +559,13 @@ static inline void __tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter, ...@@ -567,13 +559,13 @@ static inline void __tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
WRITE_ONCE(*rcu_dereference(iter->sptep), new_spte); WRITE_ONCE(*rcu_dereference(iter->sptep), new_spte);
__handle_changed_spte(kvm, as_id, iter->gfn, iter->old_spte, new_spte, __handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
iter->level, false); new_spte, iter->level, false);
if (record_acc_track) if (record_acc_track)
handle_changed_spte_acc_track(iter->old_spte, new_spte, handle_changed_spte_acc_track(iter->old_spte, new_spte,
iter->level); iter->level);
if (record_dirty_log) if (record_dirty_log)
handle_changed_spte_dirty_log(kvm, as_id, iter->gfn, handle_changed_spte_dirty_log(kvm, iter->as_id, iter->gfn,
iter->old_spte, new_spte, iter->old_spte, new_spte,
iter->level); iter->level);
} }
...@@ -645,9 +637,7 @@ static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm, ...@@ -645,9 +637,7 @@ static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm,
WARN_ON(iter->gfn > iter->next_last_level_gfn); WARN_ON(iter->gfn > iter->next_last_level_gfn);
tdp_iter_start(iter, iter->pt_path[iter->root_level - 1], tdp_iter_restart(iter);
iter->root_level, iter->min_level,
iter->next_last_level_gfn);
return true; return true;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment