Commit e662ec3e authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86/mmu: Move max hugepage level to a separate #define

Rename PT_MAX_HUGEPAGE_LEVEL to KVM_MAX_HUGEPAGE_LEVEL and make it a
separate define in anticipation of dropping KVM's PT_*_LEVEL enums in
favor of the kernel's PG_LEVEL_* enums.

No functional change intended.
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200428005422.4235-3-sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent b2f432f8
...@@ -114,10 +114,9 @@ enum { ...@@ -114,10 +114,9 @@ enum {
PT_PAGE_TABLE_LEVEL = 1, PT_PAGE_TABLE_LEVEL = 1,
PT_DIRECTORY_LEVEL = 2, PT_DIRECTORY_LEVEL = 2,
PT_PDPE_LEVEL = 3, PT_PDPE_LEVEL = 3,
/* set max level to the biggest one */
PT_MAX_HUGEPAGE_LEVEL = PT_PDPE_LEVEL,
}; };
#define KVM_NR_PAGE_SIZES (PT_MAX_HUGEPAGE_LEVEL - \ #define KVM_MAX_HUGEPAGE_LEVEL PT_PDPE_LEVEL
#define KVM_NR_PAGE_SIZES (KVM_MAX_HUGEPAGE_LEVEL - \
PT_PAGE_TABLE_LEVEL + 1) PT_PAGE_TABLE_LEVEL + 1)
#define KVM_HPAGE_GFN_SHIFT(x) (((x) - 1) * 9) #define KVM_HPAGE_GFN_SHIFT(x) (((x) - 1) * 9)
#define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + KVM_HPAGE_GFN_SHIFT(x)) #define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + KVM_HPAGE_GFN_SHIFT(x))
......
...@@ -1199,7 +1199,7 @@ static void update_gfn_disallow_lpage_count(struct kvm_memory_slot *slot, ...@@ -1199,7 +1199,7 @@ static void update_gfn_disallow_lpage_count(struct kvm_memory_slot *slot,
struct kvm_lpage_info *linfo; struct kvm_lpage_info *linfo;
int i; int i;
for (i = PT_DIRECTORY_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) { for (i = PT_DIRECTORY_LEVEL; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
linfo = lpage_info_slot(gfn, slot, i); linfo = lpage_info_slot(gfn, slot, i);
linfo->disallow_lpage += count; linfo->disallow_lpage += count;
WARN_ON(linfo->disallow_lpage < 0); WARN_ON(linfo->disallow_lpage < 0);
...@@ -1763,7 +1763,7 @@ bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm, ...@@ -1763,7 +1763,7 @@ bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
int i; int i;
bool write_protected = false; bool write_protected = false;
for (i = PT_PAGE_TABLE_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) { for (i = PT_PAGE_TABLE_LEVEL; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
rmap_head = __gfn_to_rmap(gfn, i, slot); rmap_head = __gfn_to_rmap(gfn, i, slot);
write_protected |= __rmap_write_protect(kvm, rmap_head, true); write_protected |= __rmap_write_protect(kvm, rmap_head, true);
} }
...@@ -1952,7 +1952,7 @@ static int kvm_handle_hva_range(struct kvm *kvm, ...@@ -1952,7 +1952,7 @@ static int kvm_handle_hva_range(struct kvm *kvm,
gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot); gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
for_each_slot_rmap_range(memslot, PT_PAGE_TABLE_LEVEL, for_each_slot_rmap_range(memslot, PT_PAGE_TABLE_LEVEL,
PT_MAX_HUGEPAGE_LEVEL, KVM_MAX_HUGEPAGE_LEVEL,
gfn_start, gfn_end - 1, gfn_start, gfn_end - 1,
&iterator) &iterator)
ret |= handler(kvm, iterator.rmap, memslot, ret |= handler(kvm, iterator.rmap, memslot,
...@@ -4214,7 +4214,7 @@ int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, ...@@ -4214,7 +4214,7 @@ int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
{ {
int max_level; int max_level;
for (max_level = PT_MAX_HUGEPAGE_LEVEL; for (max_level = KVM_MAX_HUGEPAGE_LEVEL;
max_level > PT_PAGE_TABLE_LEVEL; max_level > PT_PAGE_TABLE_LEVEL;
max_level--) { max_level--) {
int page_num = KVM_PAGES_PER_HPAGE(max_level); int page_num = KVM_PAGES_PER_HPAGE(max_level);
...@@ -5641,7 +5641,7 @@ slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot, ...@@ -5641,7 +5641,7 @@ slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
slot_level_handler fn, bool lock_flush_tlb) slot_level_handler fn, bool lock_flush_tlb)
{ {
return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL, return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL,
PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb); KVM_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
} }
static __always_inline bool static __always_inline bool
...@@ -5649,7 +5649,7 @@ slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot, ...@@ -5649,7 +5649,7 @@ slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
slot_level_handler fn, bool lock_flush_tlb) slot_level_handler fn, bool lock_flush_tlb)
{ {
return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL + 1, return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL + 1,
PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb); KVM_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
} }
static __always_inline bool static __always_inline bool
...@@ -5867,7 +5867,8 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end) ...@@ -5867,7 +5867,8 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
continue; continue;
slot_handle_level_range(kvm, memslot, kvm_zap_rmapp, slot_handle_level_range(kvm, memslot, kvm_zap_rmapp,
PT_PAGE_TABLE_LEVEL, PT_MAX_HUGEPAGE_LEVEL, PT_PAGE_TABLE_LEVEL,
KVM_MAX_HUGEPAGE_LEVEL,
start, end - 1, true); start, end - 1, true);
} }
} }
...@@ -5889,7 +5890,7 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, ...@@ -5889,7 +5890,7 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
spin_lock(&kvm->mmu_lock); spin_lock(&kvm->mmu_lock);
flush = slot_handle_level(kvm, memslot, slot_rmap_write_protect, flush = slot_handle_level(kvm, memslot, slot_rmap_write_protect,
start_level, PT_MAX_HUGEPAGE_LEVEL, false); start_level, KVM_MAX_HUGEPAGE_LEVEL, false);
spin_unlock(&kvm->mmu_lock); spin_unlock(&kvm->mmu_lock);
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment