Commit 1b6d9d9e authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86/mmu: Split out max mapping level calculation to helper

Factor out the logic for determining the maximum mapping level given a
memslot and a gpa.  The helper will be used when zapping collapsible
SPTEs when disabling dirty logging, e.g. to avoid zapping SPTEs that
can't possibly be rebuilt as hugepages.

No functional change intended.
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Message-Id: <20210213005015.1651772-4-seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent c060c72f
...@@ -2756,8 +2756,8 @@ static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep) ...@@ -2756,8 +2756,8 @@ static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
__direct_pte_prefetch(vcpu, sp, sptep); __direct_pte_prefetch(vcpu, sp, sptep);
} }
static int host_pfn_mapping_level(struct kvm_vcpu *vcpu, gfn_t gfn, static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn,
kvm_pfn_t pfn, struct kvm_memory_slot *slot) struct kvm_memory_slot *slot)
{ {
unsigned long hva; unsigned long hva;
pte_t *pte; pte_t *pte;
...@@ -2776,19 +2776,36 @@ static int host_pfn_mapping_level(struct kvm_vcpu *vcpu, gfn_t gfn, ...@@ -2776,19 +2776,36 @@ static int host_pfn_mapping_level(struct kvm_vcpu *vcpu, gfn_t gfn,
*/ */
hva = __gfn_to_hva_memslot(slot, gfn); hva = __gfn_to_hva_memslot(slot, gfn);
pte = lookup_address_in_mm(vcpu->kvm->mm, hva, &level); pte = lookup_address_in_mm(kvm->mm, hva, &level);
if (unlikely(!pte)) if (unlikely(!pte))
return PG_LEVEL_4K; return PG_LEVEL_4K;
return level; return level;
} }
int kvm_mmu_max_mapping_level(struct kvm *kvm, struct kvm_memory_slot *slot,
gfn_t gfn, kvm_pfn_t pfn, int max_level)
{
struct kvm_lpage_info *linfo;
max_level = min(max_level, max_huge_page_level);
for ( ; max_level > PG_LEVEL_4K; max_level--) {
linfo = lpage_info_slot(gfn, slot, max_level);
if (!linfo->disallow_lpage)
break;
}
if (max_level == PG_LEVEL_4K)
return PG_LEVEL_4K;
return host_pfn_mapping_level(kvm, gfn, pfn, slot);
}
int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn, int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn,
int max_level, kvm_pfn_t *pfnp, int max_level, kvm_pfn_t *pfnp,
bool huge_page_disallowed, int *req_level) bool huge_page_disallowed, int *req_level)
{ {
struct kvm_memory_slot *slot; struct kvm_memory_slot *slot;
struct kvm_lpage_info *linfo;
kvm_pfn_t pfn = *pfnp; kvm_pfn_t pfn = *pfnp;
kvm_pfn_t mask; kvm_pfn_t mask;
int level; int level;
...@@ -2805,17 +2822,7 @@ int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn, ...@@ -2805,17 +2822,7 @@ int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn,
if (!slot) if (!slot)
return PG_LEVEL_4K; return PG_LEVEL_4K;
max_level = min(max_level, max_huge_page_level); level = kvm_mmu_max_mapping_level(vcpu->kvm, slot, gfn, pfn, max_level);
for ( ; max_level > PG_LEVEL_4K; max_level--) {
linfo = lpage_info_slot(gfn, slot, max_level);
if (!linfo->disallow_lpage)
break;
}
if (max_level == PG_LEVEL_4K)
return PG_LEVEL_4K;
level = host_pfn_mapping_level(vcpu, gfn, pfn, slot);
if (level == PG_LEVEL_4K) if (level == PG_LEVEL_4K)
return level; return level;
......
...@@ -138,6 +138,8 @@ enum { ...@@ -138,6 +138,8 @@ enum {
#define SET_SPTE_NEED_REMOTE_TLB_FLUSH BIT(1) #define SET_SPTE_NEED_REMOTE_TLB_FLUSH BIT(1)
#define SET_SPTE_SPURIOUS BIT(2) #define SET_SPTE_SPURIOUS BIT(2)
int kvm_mmu_max_mapping_level(struct kvm *kvm, struct kvm_memory_slot *slot,
gfn_t gfn, kvm_pfn_t pfn, int max_level);
int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn, int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn,
int max_level, kvm_pfn_t *pfnp, int max_level, kvm_pfn_t *pfnp,
bool huge_page_disallowed, int *req_level); bool huge_page_disallowed, int *req_level);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment