Commit a3f15bda authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86/mmu: Pass address space ID to TDP MMU root walkers

Move the address space ID check that is performed when iterating over
roots into the macro helpers to consolidate code.

No functional change intended.
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Message-Id: <20210326021957.1424875-7-seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 2b9663d8
......@@ -88,9 +88,14 @@ static inline struct kvm_mmu_page *sptep_to_sp(u64 *sptep)
return to_shadow_page(__pa(sptep));
}
static inline int kvm_mmu_role_as_id(union kvm_mmu_page_role role)
{
return role.smm ? 1 : 0;
}
static inline int kvm_mmu_page_as_id(struct kvm_mmu_page *sp)
{
return sp->role.smm ? 1 : 0;
return kvm_mmu_role_as_id(sp->role);
}
static inline bool kvm_vcpu_ad_need_write_protect(struct kvm_vcpu *vcpu)
......
......@@ -76,14 +76,18 @@ static inline struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
* if exiting the loop early, the caller must drop the reference to the most
* recent root. (Unless keeping a live reference is desirable.)
*/
#define for_each_tdp_mmu_root_yield_safe(_kvm, _root) \
#define for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id) \
for (_root = list_first_entry(&_kvm->arch.tdp_mmu_roots, \
typeof(*_root), link); \
tdp_mmu_next_root_valid(_kvm, _root); \
_root = tdp_mmu_next_root(_kvm, _root))
_root = tdp_mmu_next_root(_kvm, _root)) \
if (kvm_mmu_page_as_id(_root) != _as_id) { \
} else
#define for_each_tdp_mmu_root(_kvm, _root) \
list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link)
#define for_each_tdp_mmu_root(_kvm, _root, _as_id) \
list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link) \
if (kvm_mmu_page_as_id(_root) != _as_id) { \
} else
static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
gfn_t start, gfn_t end, bool can_yield, bool flush);
......@@ -148,7 +152,7 @@ hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu)
role = page_role_for_level(vcpu, vcpu->arch.mmu->shadow_root_level);
/* Check for an existing root before allocating a new one. */
for_each_tdp_mmu_root(kvm, root) {
for_each_tdp_mmu_root(kvm, root, kvm_mmu_role_as_id(role)) {
if (root->role.word == role.word) {
kvm_mmu_get_root(kvm, root);
goto out;
......@@ -704,11 +708,8 @@ bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start,
{
struct kvm_mmu_page *root;
for_each_tdp_mmu_root_yield_safe(kvm, root) {
if (kvm_mmu_page_as_id(root) != as_id)
continue;
for_each_tdp_mmu_root_yield_safe(kvm, root, as_id)
flush = zap_gfn_range(kvm, root, start, end, can_yield, flush);
}
return flush;
}
......@@ -888,8 +889,8 @@ static __always_inline int kvm_tdp_mmu_handle_hva_range(struct kvm *kvm,
int ret = 0;
int as_id;
for_each_tdp_mmu_root_yield_safe(kvm, root) {
as_id = kvm_mmu_page_as_id(root);
for (as_id = 0; as_id < KVM_ADDRESS_SPACE_NUM; as_id++) {
for_each_tdp_mmu_root_yield_safe(kvm, root, as_id) {
slots = __kvm_memslots(kvm, as_id);
kvm_for_each_memslot(memslot, slots) {
unsigned long hva_start, hva_end;
......@@ -911,6 +912,7 @@ static __always_inline int kvm_tdp_mmu_handle_hva_range(struct kvm *kvm,
gfn_end, data);
}
}
}
return ret;
}
......@@ -1120,17 +1122,11 @@ bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, struct kvm_memory_slot *slot,
int min_level)
{
struct kvm_mmu_page *root;
int root_as_id;
bool spte_set = false;
for_each_tdp_mmu_root_yield_safe(kvm, root) {
root_as_id = kvm_mmu_page_as_id(root);
if (root_as_id != slot->as_id)
continue;
for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id)
spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn,
slot->base_gfn + slot->npages, min_level);
}
return spte_set;
}
......@@ -1185,17 +1181,11 @@ static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm, struct kvm_memory_slot *slot)
{
struct kvm_mmu_page *root;
int root_as_id;
bool spte_set = false;
for_each_tdp_mmu_root_yield_safe(kvm, root) {
root_as_id = kvm_mmu_page_as_id(root);
if (root_as_id != slot->as_id)
continue;
for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id)
spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn,
slot->base_gfn + slot->npages);
}
return spte_set;
}
......@@ -1257,16 +1247,10 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
bool wrprot)
{
struct kvm_mmu_page *root;
int root_as_id;
lockdep_assert_held_write(&kvm->mmu_lock);
for_each_tdp_mmu_root(kvm, root) {
root_as_id = kvm_mmu_page_as_id(root);
if (root_as_id != slot->as_id)
continue;
for_each_tdp_mmu_root(kvm, root, slot->as_id)
clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot);
}
}
/*
......@@ -1319,15 +1303,9 @@ bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
struct kvm_memory_slot *slot, bool flush)
{
struct kvm_mmu_page *root;
int root_as_id;
for_each_tdp_mmu_root_yield_safe(kvm, root) {
root_as_id = kvm_mmu_page_as_id(root);
if (root_as_id != slot->as_id)
continue;
for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id)
flush = zap_collapsible_spte_range(kvm, root, slot, flush);
}
return flush;
}
......@@ -1371,17 +1349,12 @@ bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
struct kvm_memory_slot *slot, gfn_t gfn)
{
struct kvm_mmu_page *root;
int root_as_id;
bool spte_set = false;
lockdep_assert_held_write(&kvm->mmu_lock);
for_each_tdp_mmu_root(kvm, root) {
root_as_id = kvm_mmu_page_as_id(root);
if (root_as_id != slot->as_id)
continue;
for_each_tdp_mmu_root(kvm, root, slot->as_id)
spte_set |= write_protect_gfn(kvm, root, gfn);
}
return spte_set;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment