Commit 71f51d2c authored by Mingwei Zhang's avatar Mingwei Zhang Committed by Paolo Bonzini

KVM: x86/mmu: Add detailed page size stats

Existing KVM code tracks the number of large pages regardless of their
sizes. Therefore, when large page of 1GB (or larger) is adopted, the
information becomes less useful because lpages counts a mix of 1G and 2M
pages.

So remove the lpages since it is easy for user space to aggregate the info.
Instead, provide a comprehensive page stats of all sizes from 4K to 512G.
Suggested-by: default avatarBen Gardon <bgardon@google.com>
Reviewed-by: default avatarDavid Matlack <dmatlack@google.com>
Reviewed-by: default avatarBen Gardon <bgardon@google.com>
Signed-off-by: default avatarMingwei Zhang <mizhang@google.com>
Cc: Jing Zhang <jingzhangos@google.com>
Cc: David Matlack <dmatlack@google.com>
Cc: Sean Christopherson <seanjc@google.com>
Message-Id: <20210803044607.599629-4-mizhang@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 088acd23
......@@ -1214,7 +1214,14 @@ struct kvm_vm_stat {
u64 mmu_recycled;
u64 mmu_cache_miss;
u64 mmu_unsync;
u64 lpages;
union {
struct {
atomic64_t pages_4k;
atomic64_t pages_2m;
atomic64_t pages_1g;
};
atomic64_t pages[KVM_NR_PAGE_SIZES];
};
u64 nx_lpage_splits;
u64 max_mmu_page_hash_collisions;
u64 max_mmu_rmap_size;
......
......@@ -261,4 +261,8 @@ kvm_mmu_slot_lpages(struct kvm_memory_slot *slot, int level)
return __kvm_mmu_slot_lpages(slot, slot->npages, level);
}
static inline void kvm_update_page_stats(struct kvm *kvm, int level, int count)
{
atomic64_add(count, &kvm->stat.pages[level - 1]);
}
#endif
......@@ -604,10 +604,11 @@ static bool mmu_spte_update(u64 *sptep, u64 new_spte)
* state bits, it is used to clear the last level sptep.
* Returns the old PTE.
*/
static u64 mmu_spte_clear_track_bits(u64 *sptep)
static int mmu_spte_clear_track_bits(struct kvm *kvm, u64 *sptep)
{
kvm_pfn_t pfn;
u64 old_spte = *sptep;
int level = sptep_to_sp(sptep)->role.level;
if (!spte_has_volatile_bits(old_spte))
__update_clear_spte_fast(sptep, 0ull);
......@@ -617,6 +618,8 @@ static u64 mmu_spte_clear_track_bits(u64 *sptep)
if (!is_shadow_present_pte(old_spte))
return old_spte;
kvm_update_page_stats(kvm, level, -1);
pfn = spte_to_pfn(old_spte);
/*
......@@ -1001,14 +1004,15 @@ static void __pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head)
}
}
static void pte_list_remove(struct kvm_rmap_head *rmap_head, u64 *sptep)
static void pte_list_remove(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
u64 *sptep)
{
mmu_spte_clear_track_bits(sptep);
mmu_spte_clear_track_bits(kvm, sptep);
__pte_list_remove(sptep, rmap_head);
}
/* Return true if rmap existed, false otherwise */
static bool pte_list_destroy(struct kvm_rmap_head *rmap_head)
static bool pte_list_destroy(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
{
struct pte_list_desc *desc, *next;
int i;
......@@ -1017,7 +1021,7 @@ static bool pte_list_destroy(struct kvm_rmap_head *rmap_head)
return false;
if (!(rmap_head->val & 1)) {
mmu_spte_clear_track_bits((u64 *)rmap_head->val);
mmu_spte_clear_track_bits(kvm, (u64 *)rmap_head->val);
goto out;
}
......@@ -1025,7 +1029,7 @@ static bool pte_list_destroy(struct kvm_rmap_head *rmap_head)
for (; desc; desc = next) {
for (i = 0; i < desc->spte_count; i++)
mmu_spte_clear_track_bits(desc->sptes[i]);
mmu_spte_clear_track_bits(kvm, desc->sptes[i]);
next = desc->more;
mmu_free_pte_list_desc(desc);
}
......@@ -1188,7 +1192,7 @@ static u64 *rmap_get_next(struct rmap_iterator *iter)
static void drop_spte(struct kvm *kvm, u64 *sptep)
{
u64 old_spte = mmu_spte_clear_track_bits(sptep);
u64 old_spte = mmu_spte_clear_track_bits(kvm, sptep);
if (is_shadow_present_pte(old_spte))
rmap_remove(kvm, sptep);
......@@ -1200,7 +1204,6 @@ static bool __drop_large_spte(struct kvm *kvm, u64 *sptep)
if (is_large_pte(*sptep)) {
WARN_ON(sptep_to_sp(sptep)->role.level == PG_LEVEL_4K);
drop_spte(kvm, sptep);
--kvm->stat.lpages;
return true;
}
......@@ -1450,7 +1453,7 @@ static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
const struct kvm_memory_slot *slot)
{
return pte_list_destroy(rmap_head);
return pte_list_destroy(kvm, rmap_head);
}
static bool kvm_unmap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
......@@ -1481,13 +1484,13 @@ static bool kvm_set_pte_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
need_flush = 1;
if (pte_write(pte)) {
pte_list_remove(rmap_head, sptep);
pte_list_remove(kvm, rmap_head, sptep);
goto restart;
} else {
new_spte = kvm_mmu_changed_pte_notifier_make_spte(
*sptep, new_pfn);
mmu_spte_clear_track_bits(sptep);
mmu_spte_clear_track_bits(kvm, sptep);
mmu_spte_set(sptep, new_spte);
}
}
......@@ -2292,8 +2295,6 @@ static int mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
if (is_shadow_present_pte(pte)) {
if (is_last_spte(pte, sp->role.level)) {
drop_spte(kvm, spte);
if (is_large_pte(pte))
--kvm->stat.lpages;
} else {
child = to_shadow_page(pte & PT64_BASE_ADDR_MASK);
drop_parent_pte(child, spte);
......@@ -2778,8 +2779,7 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
trace_kvm_mmu_set_spte(level, gfn, sptep);
if (!was_rmapped) {
if (is_large_pte(*sptep))
++vcpu->kvm->stat.lpages;
kvm_update_page_stats(vcpu->kvm, level, 1);
rmap_count = rmap_add(vcpu, sptep, gfn);
if (rmap_count > RMAP_RECYCLE_THRESHOLD)
rmap_recycle(vcpu, sptep, gfn);
......@@ -5809,7 +5809,7 @@ static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
if (sp->role.direct && !kvm_is_reserved_pfn(pfn) &&
sp->role.level < kvm_mmu_max_mapping_level(kvm, slot, sp->gfn,
pfn, PG_LEVEL_NUM)) {
pte_list_remove(rmap_head, sptep);
pte_list_remove(kvm, rmap_head, sptep);
if (kvm_available_flush_tlb_with_range())
kvm_flush_remote_tlbs_with_address(kvm, sp->gfn,
......
......@@ -412,7 +412,6 @@ static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
bool was_leaf = was_present && is_last_spte(old_spte, level);
bool is_leaf = is_present && is_last_spte(new_spte, level);
bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
bool was_large, is_large;
WARN_ON(level > PT64_ROOT_MAX_LEVEL);
WARN_ON(level < PG_LEVEL_4K);
......@@ -471,18 +470,8 @@ static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
return;
}
/*
* Update large page stats if a large page is being zapped, created, or
* is replacing an existing shadow page.
*/
was_large = was_leaf && is_large_pte(old_spte);
is_large = is_leaf && is_large_pte(new_spte);
if (was_large != is_large) {
if (was_large)
atomic64_sub(1, (atomic64_t *)&kvm->stat.lpages);
else
atomic64_add(1, (atomic64_t *)&kvm->stat.lpages);
}
if (is_leaf != was_leaf)
kvm_update_page_stats(kvm, level, is_leaf ? 1 : -1);
if (was_leaf && is_dirty_spte(old_spte) &&
(!is_present || !is_dirty_spte(new_spte) || pfn_changed))
......
......@@ -233,7 +233,9 @@ const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
STATS_DESC_COUNTER(VM, mmu_recycled),
STATS_DESC_COUNTER(VM, mmu_cache_miss),
STATS_DESC_ICOUNTER(VM, mmu_unsync),
STATS_DESC_ICOUNTER(VM, lpages),
STATS_DESC_ICOUNTER(VM, pages_4k),
STATS_DESC_ICOUNTER(VM, pages_2m),
STATS_DESC_ICOUNTER(VM, pages_1g),
STATS_DESC_ICOUNTER(VM, nx_lpage_splits),
STATS_DESC_PCOUNTER(VM, max_mmu_rmap_size),
STATS_DESC_PCOUNTER(VM, max_mmu_page_hash_collisions)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment