Commit e1f288d2 authored by Gautam Menghani's avatar Gautam Menghani Committed by Michael Ellerman

KVM: PPC: Book3S HV nestedv2: Add support for reading VPA counters for pseries guests

PAPR hypervisor has introduced three new counters in the VPA area of
LPAR CPUs for KVM L2 guest (see [1] for terminology) observability - two
for context switches from host to guest and vice versa, and one counter
for getting the total time spent inside the KVM guest. Add a tracepoint
that enables reading the counters for use by ftrace/perf. Note that this
tracepoint is only available for nestedv2 API (i.e, KVM on PowerVM).

[1] Terminology:
a. L1 refers to the VM (LPAR) booted on top of PAPR hypervisor
b. L2 refers to the KVM guest booted on top of L1.
Reviewed-by: default avatarNicholas Piggin <npiggin@gmail.com>
Acked-by: default avatarNaveen N Rao <naveen@kernel.org>
Signed-off-by: default avatarVaibhav Jain <vaibhav@linux.ibm.com>
Signed-off-by: default avatarGautam Menghani <gautam@linux.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://msgid.link/20240520175742.196329-1-gautam@linux.ibm.com
parent c3f38fa6
...@@ -684,6 +684,11 @@ int kvmhv_nestedv2_set_ptbl_entry(unsigned long lpid, u64 dw0, u64 dw1); ...@@ -684,6 +684,11 @@ int kvmhv_nestedv2_set_ptbl_entry(unsigned long lpid, u64 dw0, u64 dw1);
int kvmhv_nestedv2_parse_output(struct kvm_vcpu *vcpu); int kvmhv_nestedv2_parse_output(struct kvm_vcpu *vcpu);
int kvmhv_nestedv2_set_vpa(struct kvm_vcpu *vcpu, unsigned long vpa); int kvmhv_nestedv2_set_vpa(struct kvm_vcpu *vcpu, unsigned long vpa);
int kmvhv_counters_tracepoint_regfunc(void);
void kmvhv_counters_tracepoint_unregfunc(void);
int kvmhv_get_l2_counters_status(void);
void kvmhv_set_l2_counters_status(int cpu, bool status);
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
#endif /* __ASM_KVM_BOOK3S_64_H__ */ #endif /* __ASM_KVM_BOOK3S_64_H__ */
...@@ -62,7 +62,8 @@ struct lppaca { ...@@ -62,7 +62,8 @@ struct lppaca {
u8 donate_dedicated_cpu; /* Donate dedicated CPU cycles */ u8 donate_dedicated_cpu; /* Donate dedicated CPU cycles */
u8 fpregs_in_use; u8 fpregs_in_use;
u8 pmcregs_in_use; u8 pmcregs_in_use;
u8 reserved8[28]; u8 l2_counters_enable; /* Enable usage of counters for KVM guest */
u8 reserved8[27];
__be64 wait_state_cycles; /* Wait cycles for this proc */ __be64 wait_state_cycles; /* Wait cycles for this proc */
u8 reserved9[28]; u8 reserved9[28];
__be16 slb_count; /* # of SLBs to maintain */ __be16 slb_count; /* # of SLBs to maintain */
...@@ -92,9 +93,13 @@ struct lppaca { ...@@ -92,9 +93,13 @@ struct lppaca {
/* cacheline 4-5 */ /* cacheline 4-5 */
__be32 page_ins; /* CMO Hint - # page ins by OS */ __be32 page_ins; /* CMO Hint - # page ins by OS */
u8 reserved12[148]; u8 reserved12[28];
volatile __be64 l1_to_l2_cs_tb;
volatile __be64 l2_to_l1_cs_tb;
volatile __be64 l2_runtime_tb;
u8 reserved13[96];
volatile __be64 dtl_idx; /* Dispatch Trace Log head index */ volatile __be64 dtl_idx; /* Dispatch Trace Log head index */
u8 reserved13[96]; u8 reserved14[96];
} ____cacheline_aligned; } ____cacheline_aligned;
#define lppaca_of(cpu) (*paca_ptrs[cpu]->lppaca_ptr) #define lppaca_of(cpu) (*paca_ptrs[cpu]->lppaca_ptr)
......
...@@ -4108,6 +4108,77 @@ static void vcpu_vpa_increment_dispatch(struct kvm_vcpu *vcpu) ...@@ -4108,6 +4108,77 @@ static void vcpu_vpa_increment_dispatch(struct kvm_vcpu *vcpu)
} }
} }
/* Helper functions for reading L2's stats from L1's VPA */
#ifdef CONFIG_PPC_PSERIES
static DEFINE_PER_CPU(u64, l1_to_l2_cs);
static DEFINE_PER_CPU(u64, l2_to_l1_cs);
static DEFINE_PER_CPU(u64, l2_runtime_agg);
int kvmhv_get_l2_counters_status(void)
{
return firmware_has_feature(FW_FEATURE_LPAR) &&
get_lppaca()->l2_counters_enable;
}
void kvmhv_set_l2_counters_status(int cpu, bool status)
{
if (!firmware_has_feature(FW_FEATURE_LPAR))
return;
if (status)
lppaca_of(cpu).l2_counters_enable = 1;
else
lppaca_of(cpu).l2_counters_enable = 0;
}
int kmvhv_counters_tracepoint_regfunc(void)
{
int cpu;
for_each_present_cpu(cpu) {
kvmhv_set_l2_counters_status(cpu, true);
}
return 0;
}
void kmvhv_counters_tracepoint_unregfunc(void)
{
int cpu;
for_each_present_cpu(cpu) {
kvmhv_set_l2_counters_status(cpu, false);
}
}
static void do_trace_nested_cs_time(struct kvm_vcpu *vcpu)
{
struct lppaca *lp = get_lppaca();
u64 l1_to_l2_ns, l2_to_l1_ns, l2_runtime_ns;
u64 *l1_to_l2_cs_ptr = this_cpu_ptr(&l1_to_l2_cs);
u64 *l2_to_l1_cs_ptr = this_cpu_ptr(&l2_to_l1_cs);
u64 *l2_runtime_agg_ptr = this_cpu_ptr(&l2_runtime_agg);
l1_to_l2_ns = tb_to_ns(be64_to_cpu(lp->l1_to_l2_cs_tb));
l2_to_l1_ns = tb_to_ns(be64_to_cpu(lp->l2_to_l1_cs_tb));
l2_runtime_ns = tb_to_ns(be64_to_cpu(lp->l2_runtime_tb));
trace_kvmppc_vcpu_stats(vcpu, l1_to_l2_ns - *l1_to_l2_cs_ptr,
l2_to_l1_ns - *l2_to_l1_cs_ptr,
l2_runtime_ns - *l2_runtime_agg_ptr);
*l1_to_l2_cs_ptr = l1_to_l2_ns;
*l2_to_l1_cs_ptr = l2_to_l1_ns;
*l2_runtime_agg_ptr = l2_runtime_ns;
}
#else
int kvmhv_get_l2_counters_status(void)
{
return 0;
}
static void do_trace_nested_cs_time(struct kvm_vcpu *vcpu)
{
}
#endif
static int kvmhv_vcpu_entry_nestedv2(struct kvm_vcpu *vcpu, u64 time_limit, static int kvmhv_vcpu_entry_nestedv2(struct kvm_vcpu *vcpu, u64 time_limit,
unsigned long lpcr, u64 *tb) unsigned long lpcr, u64 *tb)
{ {
...@@ -4156,6 +4227,10 @@ static int kvmhv_vcpu_entry_nestedv2(struct kvm_vcpu *vcpu, u64 time_limit, ...@@ -4156,6 +4227,10 @@ static int kvmhv_vcpu_entry_nestedv2(struct kvm_vcpu *vcpu, u64 time_limit,
timer_rearm_host_dec(*tb); timer_rearm_host_dec(*tb);
/* Record context switch and guest_run_time data */
if (kvmhv_get_l2_counters_status())
do_trace_nested_cs_time(vcpu);
return trap; return trap;
} }
......
...@@ -512,6 +512,35 @@ TRACE_EVENT(kvmppc_run_vcpu_exit, ...@@ -512,6 +512,35 @@ TRACE_EVENT(kvmppc_run_vcpu_exit,
__entry->vcpu_id, __entry->exit, __entry->ret) __entry->vcpu_id, __entry->exit, __entry->ret)
); );
#ifdef CONFIG_PPC_PSERIES
TRACE_EVENT_FN_COND(kvmppc_vcpu_stats,
TP_PROTO(struct kvm_vcpu *vcpu, u64 l1_to_l2_cs, u64 l2_to_l1_cs, u64 l2_runtime),
TP_ARGS(vcpu, l1_to_l2_cs, l2_to_l1_cs, l2_runtime),
TP_CONDITION(l1_to_l2_cs || l2_to_l1_cs || l2_runtime),
TP_STRUCT__entry(
__field(int, vcpu_id)
__field(u64, l1_to_l2_cs)
__field(u64, l2_to_l1_cs)
__field(u64, l2_runtime)
),
TP_fast_assign(
__entry->vcpu_id = vcpu->vcpu_id;
__entry->l1_to_l2_cs = l1_to_l2_cs;
__entry->l2_to_l1_cs = l2_to_l1_cs;
__entry->l2_runtime = l2_runtime;
),
TP_printk("VCPU %d: l1_to_l2_cs_time=%llu ns l2_to_l1_cs_time=%llu ns l2_runtime=%llu ns",
__entry->vcpu_id, __entry->l1_to_l2_cs,
__entry->l2_to_l1_cs, __entry->l2_runtime),
kmvhv_counters_tracepoint_regfunc, kmvhv_counters_tracepoint_unregfunc
);
#endif
#endif /* _TRACE_KVM_HV_H */ #endif /* _TRACE_KVM_HV_H */
/* This part must be outside protection */ /* This part must be outside protection */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment