Commit cb953129 authored by David Matlack's avatar David Matlack Committed by Paolo Bonzini

kvm: add halt-polling cpu usage stats

Two new stats for exposing halt-polling cpu usage:
halt_poll_success_ns
halt_poll_fail_ns

Thus sum of these 2 stats is the total cpu time spent polling. "success"
means the VCPU polled until a virtual interrupt was delivered. "fail"
means the VCPU had to schedule out (either because the maximum poll time
was reached or it needed to yield the CPU).

To avoid touching every arch's kvm_vcpu_stat struct, only update and
export halt-polling cpu usage stats if we're on x86.

Exporting cpu usage as a u64 and in nanoseconds means we will overflow at
~500 years, which seems reasonably large.
Signed-off-by: default avatarDavid Matlack <dmatlack@google.com>
Signed-off-by: default avatarJon Cargille <jcargill@google.com>
Reviewed-by: default avatarJim Mattson <jmattson@google.com>

Message-Id: <20200508182240.68440-1-jcargill@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 93dff2fe
......@@ -415,6 +415,8 @@ struct kvm_vm_stat {
struct kvm_vcpu_stat {
u64 halt_successful_poll;
u64 halt_attempted_poll;
u64 halt_poll_success_ns;
u64 halt_poll_fail_ns;
u64 halt_poll_invalid;
u64 halt_wakeup;
u64 hvc_exit_stat;
......
......@@ -40,6 +40,8 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
VCPU_STAT("mmio_exit_user", mmio_exit_user),
VCPU_STAT("mmio_exit_kernel", mmio_exit_kernel),
VCPU_STAT("exits", exits),
VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns),
VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns),
{ NULL }
};
......
......@@ -174,6 +174,8 @@ struct kvm_vcpu_stat {
#endif
u64 halt_successful_poll;
u64 halt_attempted_poll;
u64 halt_poll_success_ns;
u64 halt_poll_fail_ns;
u64 halt_poll_invalid;
u64 halt_wakeup;
};
......
......@@ -72,6 +72,8 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
VCPU_STAT("halt_attempted_poll", halt_attempted_poll),
VCPU_STAT("halt_poll_invalid", halt_poll_invalid),
VCPU_STAT("halt_wakeup", halt_wakeup),
VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns),
VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns),
{NULL}
};
......
......@@ -54,6 +54,8 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
VCPU_STAT("halt_wakeup", halt_wakeup),
VCPU_STAT("doorbell", dbell_exits),
VCPU_STAT("guest doorbell", gdbell_exits),
VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns),
VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns),
VM_STAT("remote_tlb_flush", remote_tlb_flush),
{ NULL }
};
......
......@@ -375,6 +375,8 @@ struct kvm_vcpu_stat {
u64 halt_poll_invalid;
u64 halt_no_poll_steal;
u64 halt_wakeup;
u64 halt_poll_success_ns;
u64 halt_poll_fail_ns;
u64 instruction_lctl;
u64 instruction_lctlg;
u64 instruction_stctl;
......
......@@ -75,6 +75,8 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
VCPU_STAT("halt_poll_invalid", halt_poll_invalid),
VCPU_STAT("halt_no_poll_steal", halt_no_poll_steal),
VCPU_STAT("halt_wakeup", halt_wakeup),
VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns),
VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns),
VCPU_STAT("instruction_lctlg", instruction_lctlg),
VCPU_STAT("instruction_lctl", instruction_lctl),
VCPU_STAT("instruction_stctl", instruction_stctl),
......
......@@ -1031,6 +1031,8 @@ struct kvm_vcpu_stat {
u64 irq_injections;
u64 nmi_injections;
u64 req_event;
u64 halt_poll_success_ns;
u64 halt_poll_fail_ns;
};
struct x86_instruction_info;
......
......@@ -217,6 +217,8 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
VCPU_STAT("nmi_injections", nmi_injections),
VCPU_STAT("req_event", req_event),
VCPU_STAT("l1d_flush", l1d_flush),
VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns),
VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns),
VM_STAT("mmu_shadow_zapped", mmu_shadow_zapped),
VM_STAT("mmu_pte_write", mmu_pte_write),
VM_STAT("mmu_pte_updated", mmu_pte_updated),
......
......@@ -2667,18 +2667,27 @@ static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu)
return ret;
}
static inline void
update_halt_poll_stats(struct kvm_vcpu *vcpu, u64 poll_ns, bool waited)
{
if (waited)
vcpu->stat.halt_poll_fail_ns += poll_ns;
else
vcpu->stat.halt_poll_success_ns += poll_ns;
}
/*
* The vCPU has executed a HLT instruction with in-kernel mode enabled.
*/
void kvm_vcpu_block(struct kvm_vcpu *vcpu)
{
ktime_t start, cur;
ktime_t start, cur, poll_end;
bool waited = false;
u64 block_ns;
kvm_arch_vcpu_blocking(vcpu);
start = cur = ktime_get();
start = cur = poll_end = ktime_get();
if (vcpu->halt_poll_ns && !kvm_arch_no_poll(vcpu)) {
ktime_t stop = ktime_add_ns(ktime_get(), vcpu->halt_poll_ns);
......@@ -2694,7 +2703,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
++vcpu->stat.halt_poll_invalid;
goto out;
}
cur = ktime_get();
poll_end = cur = ktime_get();
} while (single_task_running() && ktime_before(cur, stop));
}
......@@ -2714,6 +2723,9 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
kvm_arch_vcpu_unblocking(vcpu);
block_ns = ktime_to_ns(cur) - ktime_to_ns(start);
update_halt_poll_stats(
vcpu, ktime_to_ns(ktime_sub(poll_end, start)), waited);
if (!kvm_arch_no_poll(vcpu)) {
if (!vcpu_valid_wakeup(vcpu)) {
shrink_halt_poll_ns(vcpu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment