Commit 140139c5 authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvm-s390-next-6.7-1' of...

Merge tag 'kvm-s390-next-6.7-1' of https://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into HEAD

- nested page table management performance counters
parents 957eedc7 70fea301
...@@ -777,6 +777,13 @@ struct kvm_vm_stat { ...@@ -777,6 +777,13 @@ struct kvm_vm_stat {
u64 inject_service_signal; u64 inject_service_signal;
u64 inject_virtio; u64 inject_virtio;
u64 aen_forward; u64 aen_forward;
u64 gmap_shadow_create;
u64 gmap_shadow_reuse;
u64 gmap_shadow_r1_entry;
u64 gmap_shadow_r2_entry;
u64 gmap_shadow_r3_entry;
u64 gmap_shadow_sg_entry;
u64 gmap_shadow_pg_entry;
}; };
struct kvm_arch_memory_slot { struct kvm_arch_memory_slot {
......
...@@ -1382,6 +1382,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr, ...@@ -1382,6 +1382,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
unsigned long *pgt, int *dat_protection, unsigned long *pgt, int *dat_protection,
int *fake) int *fake)
{ {
struct kvm *kvm;
struct gmap *parent; struct gmap *parent;
union asce asce; union asce asce;
union vaddress vaddr; union vaddress vaddr;
...@@ -1390,6 +1391,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr, ...@@ -1390,6 +1391,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
*fake = 0; *fake = 0;
*dat_protection = 0; *dat_protection = 0;
kvm = sg->private;
parent = sg->parent; parent = sg->parent;
vaddr.addr = saddr; vaddr.addr = saddr;
asce.val = sg->orig_asce; asce.val = sg->orig_asce;
...@@ -1450,6 +1452,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr, ...@@ -1450,6 +1452,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
rc = gmap_shadow_r2t(sg, saddr, rfte.val, *fake); rc = gmap_shadow_r2t(sg, saddr, rfte.val, *fake);
if (rc) if (rc)
return rc; return rc;
kvm->stat.gmap_shadow_r1_entry++;
} }
fallthrough; fallthrough;
case ASCE_TYPE_REGION2: { case ASCE_TYPE_REGION2: {
...@@ -1478,6 +1481,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr, ...@@ -1478,6 +1481,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
rc = gmap_shadow_r3t(sg, saddr, rste.val, *fake); rc = gmap_shadow_r3t(sg, saddr, rste.val, *fake);
if (rc) if (rc)
return rc; return rc;
kvm->stat.gmap_shadow_r2_entry++;
} }
fallthrough; fallthrough;
case ASCE_TYPE_REGION3: { case ASCE_TYPE_REGION3: {
...@@ -1515,6 +1519,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr, ...@@ -1515,6 +1519,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
rc = gmap_shadow_sgt(sg, saddr, rtte.val, *fake); rc = gmap_shadow_sgt(sg, saddr, rtte.val, *fake);
if (rc) if (rc)
return rc; return rc;
kvm->stat.gmap_shadow_r3_entry++;
} }
fallthrough; fallthrough;
case ASCE_TYPE_SEGMENT: { case ASCE_TYPE_SEGMENT: {
...@@ -1548,6 +1553,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr, ...@@ -1548,6 +1553,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
rc = gmap_shadow_pgt(sg, saddr, ste.val, *fake); rc = gmap_shadow_pgt(sg, saddr, ste.val, *fake);
if (rc) if (rc)
return rc; return rc;
kvm->stat.gmap_shadow_sg_entry++;
} }
} }
/* Return the parent address of the page table */ /* Return the parent address of the page table */
...@@ -1618,6 +1624,7 @@ int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg, ...@@ -1618,6 +1624,7 @@ int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
pte.p |= dat_protection; pte.p |= dat_protection;
if (!rc) if (!rc)
rc = gmap_shadow_page(sg, saddr, __pte(pte.val)); rc = gmap_shadow_page(sg, saddr, __pte(pte.val));
vcpu->kvm->stat.gmap_shadow_pg_entry++;
ipte_unlock(vcpu->kvm); ipte_unlock(vcpu->kvm);
mmap_read_unlock(sg->mm); mmap_read_unlock(sg->mm);
return rc; return rc;
......
...@@ -66,7 +66,14 @@ const struct _kvm_stats_desc kvm_vm_stats_desc[] = { ...@@ -66,7 +66,14 @@ const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
STATS_DESC_COUNTER(VM, inject_pfault_done), STATS_DESC_COUNTER(VM, inject_pfault_done),
STATS_DESC_COUNTER(VM, inject_service_signal), STATS_DESC_COUNTER(VM, inject_service_signal),
STATS_DESC_COUNTER(VM, inject_virtio), STATS_DESC_COUNTER(VM, inject_virtio),
STATS_DESC_COUNTER(VM, aen_forward) STATS_DESC_COUNTER(VM, aen_forward),
STATS_DESC_COUNTER(VM, gmap_shadow_reuse),
STATS_DESC_COUNTER(VM, gmap_shadow_create),
STATS_DESC_COUNTER(VM, gmap_shadow_r1_entry),
STATS_DESC_COUNTER(VM, gmap_shadow_r2_entry),
STATS_DESC_COUNTER(VM, gmap_shadow_r3_entry),
STATS_DESC_COUNTER(VM, gmap_shadow_sg_entry),
STATS_DESC_COUNTER(VM, gmap_shadow_pg_entry),
}; };
const struct kvm_stats_header kvm_vm_stats_header = { const struct kvm_stats_header kvm_vm_stats_header = {
...@@ -4053,6 +4060,8 @@ static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start, ...@@ -4053,6 +4060,8 @@ static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
unsigned long prefix; unsigned long prefix;
unsigned long i; unsigned long i;
trace_kvm_s390_gmap_notifier(start, end, gmap_is_shadow(gmap));
if (gmap_is_shadow(gmap)) if (gmap_is_shadow(gmap))
return; return;
if (start >= 1UL << 31) if (start >= 1UL << 31)
......
...@@ -333,6 +333,29 @@ TRACE_EVENT(kvm_s390_airq_suppressed, ...@@ -333,6 +333,29 @@ TRACE_EVENT(kvm_s390_airq_suppressed,
__entry->id, __entry->isc) __entry->id, __entry->isc)
); );
/*
* Trace point for gmap notifier calls.
*/
TRACE_EVENT(kvm_s390_gmap_notifier,
TP_PROTO(unsigned long start, unsigned long end, unsigned int shadow),
TP_ARGS(start, end, shadow),
TP_STRUCT__entry(
__field(unsigned long, start)
__field(unsigned long, end)
__field(unsigned int, shadow)
),
TP_fast_assign(
__entry->start = start;
__entry->end = end;
__entry->shadow = shadow;
),
TP_printk("gmap notified (start:0x%lx end:0x%lx shadow:%d)",
__entry->start, __entry->end, __entry->shadow)
);
#endif /* _TRACE_KVMS390_H */ #endif /* _TRACE_KVMS390_H */
......
...@@ -1214,8 +1214,10 @@ static int acquire_gmap_shadow(struct kvm_vcpu *vcpu, ...@@ -1214,8 +1214,10 @@ static int acquire_gmap_shadow(struct kvm_vcpu *vcpu,
* we're holding has been unshadowed. If the gmap is still valid, * we're holding has been unshadowed. If the gmap is still valid,
* we can safely reuse it. * we can safely reuse it.
*/ */
if (vsie_page->gmap && gmap_shadow_valid(vsie_page->gmap, asce, edat)) if (vsie_page->gmap && gmap_shadow_valid(vsie_page->gmap, asce, edat)) {
vcpu->kvm->stat.gmap_shadow_reuse++;
return 0; return 0;
}
/* release the old shadow - if any, and mark the prefix as unmapped */ /* release the old shadow - if any, and mark the prefix as unmapped */
release_gmap_shadow(vsie_page); release_gmap_shadow(vsie_page);
...@@ -1223,6 +1225,7 @@ static int acquire_gmap_shadow(struct kvm_vcpu *vcpu, ...@@ -1223,6 +1225,7 @@ static int acquire_gmap_shadow(struct kvm_vcpu *vcpu,
if (IS_ERR(gmap)) if (IS_ERR(gmap))
return PTR_ERR(gmap); return PTR_ERR(gmap);
gmap->private = vcpu->kvm; gmap->private = vcpu->kvm;
vcpu->kvm->stat.gmap_shadow_create++;
WRITE_ONCE(vsie_page->gmap, gmap); WRITE_ONCE(vsie_page->gmap, gmap);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment