Commit 25462f7f authored by Wei Huang's avatar Wei Huang Committed by Paolo Bonzini

KVM: x86/vPMU: Define kvm_pmu_ops to support vPMU function dispatch

This patch defines a new function pointer struct (kvm_pmu_ops) to
support vPMU for both Intel and AMD. The functions pointers defined in
this new struct will be linked with Intel and AMD functions later. In the
meanwhile the struct that maps from event_sel bits to PERF_TYPE_HARDWARE
events is renamed and moved from Intel specific code to kvm_host.h as a
common struct.
Reviewed-by: default avatarJoerg Roedel <jroedel@suse.de>
Tested-by: default avatarJoerg Roedel <jroedel@suse.de>
Signed-off-by: default avatarWei Huang <wei@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 41aac14a
...@@ -336,6 +336,8 @@ struct kvm_pmu { ...@@ -336,6 +336,8 @@ struct kvm_pmu {
u64 reprogram_pmi; u64 reprogram_pmi;
}; };
struct kvm_pmu_ops;
enum { enum {
KVM_DEBUGREG_BP_ENABLED = 1, KVM_DEBUGREG_BP_ENABLED = 1,
KVM_DEBUGREG_WONT_EXIT = 2, KVM_DEBUGREG_WONT_EXIT = 2,
...@@ -854,6 +856,8 @@ struct kvm_x86_ops { ...@@ -854,6 +856,8 @@ struct kvm_x86_ops {
void (*enable_log_dirty_pt_masked)(struct kvm *kvm, void (*enable_log_dirty_pt_masked)(struct kvm *kvm,
struct kvm_memory_slot *slot, struct kvm_memory_slot *slot,
gfn_t offset, unsigned long mask); gfn_t offset, unsigned long mask);
/* pmu operations of sub-arch */
const struct kvm_pmu_ops *pmu_ops;
}; };
struct kvm_arch_async_pf { struct kvm_arch_async_pf {
......
...@@ -14,8 +14,8 @@ kvm-$(CONFIG_KVM_ASYNC_PF) += $(KVM)/async_pf.o ...@@ -14,8 +14,8 @@ kvm-$(CONFIG_KVM_ASYNC_PF) += $(KVM)/async_pf.o
kvm-y += x86.o mmu.o emulate.o i8259.o irq.o lapic.o \ kvm-y += x86.o mmu.o emulate.o i8259.o irq.o lapic.o \
i8254.o ioapic.o irq_comm.o cpuid.o pmu.o mtrr.o i8254.o ioapic.o irq_comm.o cpuid.o pmu.o mtrr.o
kvm-$(CONFIG_KVM_DEVICE_ASSIGNMENT) += assigned-dev.o iommu.o kvm-$(CONFIG_KVM_DEVICE_ASSIGNMENT) += assigned-dev.o iommu.o
kvm-intel-y += vmx.o kvm-intel-y += vmx.o pmu_intel.o
kvm-amd-y += svm.o kvm-amd-y += svm.o pmu_amd.o
obj-$(CONFIG_KVM) += kvm.o obj-$(CONFIG_KVM) += kvm.o
obj-$(CONFIG_KVM_INTEL) += kvm-intel.o obj-$(CONFIG_KVM_INTEL) += kvm-intel.o
......
/* /*
* Kernel-based Virtual Machine -- Performance Monitoring Unit support * Kernel-based Virtual Machine -- Performance Monitoring Unit support
* *
* Copyright 2011 Red Hat, Inc. and/or its affiliates. * Copyright 2015 Red Hat, Inc. and/or its affiliates.
* *
* Authors: * Authors:
* Avi Kivity <avi@redhat.com> * Avi Kivity <avi@redhat.com>
* Gleb Natapov <gleb@redhat.com> * Gleb Natapov <gleb@redhat.com>
* Wei Huang <wei@redhat.com>
* *
* This work is licensed under the terms of the GNU GPL, version 2. See * This work is licensed under the terms of the GNU GPL, version 2. See
* the COPYING file in the top-level directory. * the COPYING file in the top-level directory.
...@@ -21,67 +22,30 @@ ...@@ -21,67 +22,30 @@
#include "lapic.h" #include "lapic.h"
#include "pmu.h" #include "pmu.h"
static struct kvm_event_hw_type_mapping arch_events[] = { /* NOTE:
/* Index must match CPUID 0x0A.EBX bit vector */ * - Each perf counter is defined as "struct kvm_pmc";
[0] = { 0x3c, 0x00, PERF_COUNT_HW_CPU_CYCLES }, * - There are two types of perf counters: general purpose (gp) and fixed.
[1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS }, * gp counters are stored in gp_counters[] and fixed counters are stored
[2] = { 0x3c, 0x01, PERF_COUNT_HW_BUS_CYCLES }, * in fixed_counters[] respectively. Both of them are part of "struct
[3] = { 0x2e, 0x4f, PERF_COUNT_HW_CACHE_REFERENCES }, * kvm_pmu";
[4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES }, * - pmu.c understands the difference between gp counters and fixed counters.
[5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, * However AMD doesn't support fixed-counters;
[6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES }, * - There are three types of index to access perf counters (PMC):
[7] = { 0x00, 0x30, PERF_COUNT_HW_REF_CPU_CYCLES }, * 1. MSR (named msr): For example Intel has MSR_IA32_PERFCTRn and AMD
}; * has MSR_K7_PERFCTRn.
* 2. MSR Index (named idx): This normally is used by RDPMC instruction.
/* mapping between fixed pmc index and arch_events array */ * For instance AMD RDPMC instruction uses 0000_0003h in ECX to access
static int fixed_pmc_events[] = {1, 0, 7}; * C001_0007h (MSR_K7_PERCTR3). Intel has a similar mechanism, except
* that it also supports fixed counters. idx can be used to as index to
static bool pmc_is_gp(struct kvm_pmc *pmc) * gp and fixed counters.
{ * 3. Global PMC Index (named pmc): pmc is an index specific to PMU
return pmc->type == KVM_PMC_GP; * code. Each pmc, stored in kvm_pmc.idx field, is unique across
} * all perf counters (both gp and fixed). The mapping relationship
* between pmc and perf counters is as the following:
static inline u64 pmc_bitmask(struct kvm_pmc *pmc) * * Intel: [0 .. INTEL_PMC_MAX_GENERIC-1] <=> gp counters
{ * [INTEL_PMC_IDX_FIXED .. INTEL_PMC_IDX_FIXED + 2] <=> fixed
struct kvm_pmu *pmu = pmc_to_pmu(pmc); * * AMD: [0 .. AMD64_NUM_COUNTERS-1] <=> gp counters
*/
return pmu->counter_bitmask[pmc->type];
}
static inline bool pmc_is_enabled(struct kvm_pmc *pmc)
{
struct kvm_pmu *pmu = pmc_to_pmu(pmc);
return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
}
static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr,
u32 base)
{
if (msr >= base && msr < base + pmu->nr_arch_gp_counters)
return &pmu->gp_counters[msr - base];
return NULL;
}
static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
{
int base = MSR_CORE_PERF_FIXED_CTR0;
if (msr >= base && msr < base + pmu->nr_arch_fixed_counters)
return &pmu->fixed_counters[msr - base];
return NULL;
}
static inline struct kvm_pmc *get_fixed_pmc_idx(struct kvm_pmu *pmu, int idx)
{
return get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + idx);
}
static struct kvm_pmc *global_idx_to_pmc(struct kvm_pmu *pmu, int idx)
{
if (idx < INTEL_PMC_IDX_FIXED)
return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + idx, MSR_P6_EVNTSEL0);
else
return get_fixed_pmc_idx(pmu, idx - INTEL_PMC_IDX_FIXED);
}
static void kvm_pmi_trigger_fn(struct irq_work *irq_work) static void kvm_pmi_trigger_fn(struct irq_work *irq_work)
{ {
...@@ -132,30 +96,6 @@ static void kvm_perf_overflow_intr(struct perf_event *perf_event, ...@@ -132,30 +96,6 @@ static void kvm_perf_overflow_intr(struct perf_event *perf_event,
} }
} }
static u64 pmc_read_counter(struct kvm_pmc *pmc)
{
u64 counter, enabled, running;
counter = pmc->counter;
if (pmc->perf_event)
counter += perf_event_read_value(pmc->perf_event,
&enabled, &running);
/* FIXME: Scaling needed? */
return counter & pmc_bitmask(pmc);
}
static void pmc_stop_counter(struct kvm_pmc *pmc)
{
if (pmc->perf_event) {
pmc->counter = pmc_read_counter(pmc);
perf_event_release_kernel(pmc->perf_event);
pmc->perf_event = NULL;
}
}
static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type, static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
unsigned config, bool exclude_user, unsigned config, bool exclude_user,
bool exclude_kernel, bool intr, bool exclude_kernel, bool intr,
...@@ -193,24 +133,7 @@ static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type, ...@@ -193,24 +133,7 @@ static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
clear_bit(pmc->idx, (unsigned long*)&pmc_to_pmu(pmc)->reprogram_pmi); clear_bit(pmc->idx, (unsigned long*)&pmc_to_pmu(pmc)->reprogram_pmi);
} }
static unsigned find_arch_event(struct kvm_pmu *pmu, u8 event_select, void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
u8 unit_mask)
{
int i;
for (i = 0; i < ARRAY_SIZE(arch_events); i++)
if (arch_events[i].eventsel == event_select
&& arch_events[i].unit_mask == unit_mask
&& (pmu->available_event_types & (1 << i)))
break;
if (i == ARRAY_SIZE(arch_events))
return PERF_COUNT_HW_MAX;
return arch_events[i].event_type;
}
static void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
{ {
unsigned config, type = PERF_TYPE_RAW; unsigned config, type = PERF_TYPE_RAW;
u8 event_select, unit_mask; u8 event_select, unit_mask;
...@@ -233,8 +156,9 @@ static void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel) ...@@ -233,8 +156,9 @@ static void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
ARCH_PERFMON_EVENTSEL_CMASK | ARCH_PERFMON_EVENTSEL_CMASK |
HSW_IN_TX | HSW_IN_TX |
HSW_IN_TX_CHECKPOINTED))) { HSW_IN_TX_CHECKPOINTED))) {
config = find_arch_event(pmc_to_pmu(pmc), event_select, config = kvm_x86_ops->pmu_ops->find_arch_event(pmc_to_pmu(pmc),
unit_mask); event_select,
unit_mask);
if (config != PERF_COUNT_HW_MAX) if (config != PERF_COUNT_HW_MAX)
type = PERF_TYPE_HARDWARE; type = PERF_TYPE_HARDWARE;
} }
...@@ -249,8 +173,9 @@ static void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel) ...@@ -249,8 +173,9 @@ static void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
(eventsel & HSW_IN_TX), (eventsel & HSW_IN_TX),
(eventsel & HSW_IN_TX_CHECKPOINTED)); (eventsel & HSW_IN_TX_CHECKPOINTED));
} }
EXPORT_SYMBOL_GPL(reprogram_gp_counter);
static void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int idx) void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int idx)
{ {
unsigned en_field = ctrl & 0x3; unsigned en_field = ctrl & 0x3;
bool pmi = ctrl & 0x8; bool pmi = ctrl & 0x8;
...@@ -261,38 +186,16 @@ static void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int idx) ...@@ -261,38 +186,16 @@ static void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int idx)
return; return;
pmc_reprogram_counter(pmc, PERF_TYPE_HARDWARE, pmc_reprogram_counter(pmc, PERF_TYPE_HARDWARE,
arch_events[fixed_pmc_events[idx]].event_type, kvm_x86_ops->pmu_ops->find_fixed_event(idx),
!(en_field & 0x2), /* exclude user */ !(en_field & 0x2), /* exclude user */
!(en_field & 0x1), /* exclude kernel */ !(en_field & 0x1), /* exclude kernel */
pmi, false, false); pmi, false, false);
} }
EXPORT_SYMBOL_GPL(reprogram_fixed_counter);
static inline u8 fixed_ctrl_field(u64 ctrl, int idx) void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx)
{
return (ctrl >> (idx * 4)) & 0xf;
}
static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
{ {
int i; struct kvm_pmc *pmc = kvm_x86_ops->pmu_ops->pmc_idx_to_pmc(pmu, pmc_idx);
for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
u8 old_ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, i);
u8 new_ctrl = fixed_ctrl_field(data, i);
struct kvm_pmc *pmc = get_fixed_pmc_idx(pmu, i);
if (old_ctrl == new_ctrl)
continue;
reprogram_fixed_counter(pmc, new_ctrl, i);
}
pmu->fixed_ctr_ctrl = data;
}
static void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx)
{
struct kvm_pmc *pmc = global_idx_to_pmc(pmu, pmc_idx);
if (!pmc) if (!pmc)
return; return;
...@@ -306,17 +209,7 @@ static void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx) ...@@ -306,17 +209,7 @@ static void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx)
reprogram_fixed_counter(pmc, ctrl, idx); reprogram_fixed_counter(pmc, ctrl, idx);
} }
} }
EXPORT_SYMBOL_GPL(reprogram_counter);
static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
{
int bit;
u64 diff = pmu->global_ctrl ^ data;
pmu->global_ctrl = data;
for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX)
reprogram_counter(pmu, bit);
}
void kvm_pmu_handle_event(struct kvm_vcpu *vcpu) void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
{ {
...@@ -327,7 +220,7 @@ void kvm_pmu_handle_event(struct kvm_vcpu *vcpu) ...@@ -327,7 +220,7 @@ void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
bitmask = pmu->reprogram_pmi; bitmask = pmu->reprogram_pmi;
for_each_set_bit(bit, (unsigned long *)&bitmask, X86_PMC_IDX_MAX) { for_each_set_bit(bit, (unsigned long *)&bitmask, X86_PMC_IDX_MAX) {
struct kvm_pmc *pmc = global_idx_to_pmc(pmu, bit); struct kvm_pmc *pmc = kvm_x86_ops->pmu_ops->pmc_idx_to_pmc(pmu, bit);
if (unlikely(!pmc || !pmc->perf_event)) { if (unlikely(!pmc || !pmc->perf_event)) {
clear_bit(bit, (unsigned long *)&pmu->reprogram_pmi); clear_bit(bit, (unsigned long *)&pmu->reprogram_pmi);
...@@ -341,28 +234,7 @@ void kvm_pmu_handle_event(struct kvm_vcpu *vcpu) ...@@ -341,28 +234,7 @@ void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
/* check if idx is a valid index to access PMU */ /* check if idx is a valid index to access PMU */
int kvm_pmu_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx) int kvm_pmu_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx)
{ {
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); return kvm_x86_ops->pmu_ops->is_valid_msr_idx(vcpu, idx);
bool fixed = idx & (1u << 30);
idx &= ~(3u << 30);
return (!fixed && idx >= pmu->nr_arch_gp_counters) ||
(fixed && idx >= pmu->nr_arch_fixed_counters);
}
static struct kvm_pmc *kvm_pmu_msr_idx_to_pmc(struct kvm_vcpu *vcpu,
unsigned idx)
{
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
bool fixed = idx & (1u << 30);
struct kvm_pmc *counters;
idx &= ~(3u << 30);
if (!fixed && idx >= pmu->nr_arch_gp_counters)
return NULL;
if (fixed && idx >= pmu->nr_arch_fixed_counters)
return NULL;
counters = fixed ? pmu->fixed_counters : pmu->gp_counters;
return &counters[idx];
} }
int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data) int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
...@@ -371,7 +243,7 @@ int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data) ...@@ -371,7 +243,7 @@ int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
struct kvm_pmc *pmc; struct kvm_pmc *pmc;
u64 ctr_val; u64 ctr_val;
pmc = kvm_pmu_msr_idx_to_pmc(vcpu, idx); pmc = kvm_x86_ops->pmu_ops->msr_idx_to_pmc(vcpu, idx);
if (!pmc) if (!pmc)
return 1; return 1;
...@@ -391,111 +263,17 @@ void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu) ...@@ -391,111 +263,17 @@ void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
{ {
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); return kvm_x86_ops->pmu_ops->is_valid_msr(vcpu, msr);
int ret;
switch (msr) {
case MSR_CORE_PERF_FIXED_CTR_CTRL:
case MSR_CORE_PERF_GLOBAL_STATUS:
case MSR_CORE_PERF_GLOBAL_CTRL:
case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
ret = pmu->version > 1;
break;
default:
ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)
|| get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0)
|| get_fixed_pmc(pmu, msr);
break;
}
return ret;
} }
int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data) int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
{ {
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); return kvm_x86_ops->pmu_ops->get_msr(vcpu, msr, data);
struct kvm_pmc *pmc;
switch (index) {
case MSR_CORE_PERF_FIXED_CTR_CTRL:
*data = pmu->fixed_ctr_ctrl;
return 0;
case MSR_CORE_PERF_GLOBAL_STATUS:
*data = pmu->global_status;
return 0;
case MSR_CORE_PERF_GLOBAL_CTRL:
*data = pmu->global_ctrl;
return 0;
case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
*data = pmu->global_ovf_ctrl;
return 0;
default:
if ((pmc = get_gp_pmc(pmu, index, MSR_IA32_PERFCTR0)) ||
(pmc = get_fixed_pmc(pmu, index))) {
*data = pmc_read_counter(pmc);
return 0;
} else if ((pmc = get_gp_pmc(pmu, index, MSR_P6_EVNTSEL0))) {
*data = pmc->eventsel;
return 0;
}
}
return 1;
} }
int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{ {
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); return kvm_x86_ops->pmu_ops->set_msr(vcpu, msr_info);
struct kvm_pmc *pmc;
u32 index = msr_info->index;
u64 data = msr_info->data;
switch (index) {
case MSR_CORE_PERF_FIXED_CTR_CTRL:
if (pmu->fixed_ctr_ctrl == data)
return 0;
if (!(data & 0xfffffffffffff444ull)) {
reprogram_fixed_counters(pmu, data);
return 0;
}
break;
case MSR_CORE_PERF_GLOBAL_STATUS:
if (msr_info->host_initiated) {
pmu->global_status = data;
return 0;
}
break; /* RO MSR */
case MSR_CORE_PERF_GLOBAL_CTRL:
if (pmu->global_ctrl == data)
return 0;
if (!(data & pmu->global_ctrl_mask)) {
global_ctrl_changed(pmu, data);
return 0;
}
break;
case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
if (!(data & (pmu->global_ctrl_mask & ~(3ull<<62)))) {
if (!msr_info->host_initiated)
pmu->global_status &= ~data;
pmu->global_ovf_ctrl = data;
return 0;
}
break;
default:
if ((pmc = get_gp_pmc(pmu, index, MSR_IA32_PERFCTR0)) ||
(pmc = get_fixed_pmc(pmu, index))) {
if (!msr_info->host_initiated)
data = (s64)(s32)data;
pmc->counter += data - pmc_read_counter(pmc);
return 0;
} else if ((pmc = get_gp_pmc(pmu, index, MSR_P6_EVNTSEL0))) {
if (data == pmc->eventsel)
return 0;
if (!(data & pmu->reserved_bits)) {
reprogram_gp_counter(pmc, data);
return 0;
}
}
}
return 1;
} }
/* refresh PMU settings. This function generally is called when underlying /* refresh PMU settings. This function generally is called when underlying
...@@ -504,90 +282,23 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -504,90 +282,23 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
*/ */
void kvm_pmu_refresh(struct kvm_vcpu *vcpu) void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
{ {
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); kvm_x86_ops->pmu_ops->refresh(vcpu);
struct kvm_cpuid_entry2 *entry;
union cpuid10_eax eax;
union cpuid10_edx edx;
pmu->nr_arch_gp_counters = 0;
pmu->nr_arch_fixed_counters = 0;
pmu->counter_bitmask[KVM_PMC_GP] = 0;
pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
pmu->version = 0;
pmu->reserved_bits = 0xffffffff00200000ull;
entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
if (!entry)
return;
eax.full = entry->eax;
edx.full = entry->edx;
pmu->version = eax.split.version_id;
if (!pmu->version)
return;
pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters,
INTEL_PMC_MAX_GENERIC);
pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1;
pmu->available_event_types = ~entry->ebx &
((1ull << eax.split.mask_length) - 1);
if (pmu->version == 1) {
pmu->nr_arch_fixed_counters = 0;
} else {
pmu->nr_arch_fixed_counters =
min_t(int, edx.split.num_counters_fixed,
INTEL_PMC_MAX_FIXED);
pmu->counter_bitmask[KVM_PMC_FIXED] =
((u64)1 << edx.split.bit_width_fixed) - 1;
}
pmu->global_ctrl = ((1 << pmu->nr_arch_gp_counters) - 1) |
(((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED);
pmu->global_ctrl_mask = ~pmu->global_ctrl;
entry = kvm_find_cpuid_entry(vcpu, 7, 0);
if (entry &&
(boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) &&
(entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM)))
pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED;
} }
void kvm_pmu_reset(struct kvm_vcpu *vcpu) void kvm_pmu_reset(struct kvm_vcpu *vcpu)
{ {
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
int i;
irq_work_sync(&pmu->irq_work); irq_work_sync(&pmu->irq_work);
for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) { kvm_x86_ops->pmu_ops->reset(vcpu);
struct kvm_pmc *pmc = &pmu->gp_counters[i];
pmc_stop_counter(pmc);
pmc->counter = pmc->eventsel = 0;
}
for (i = 0; i < INTEL_PMC_MAX_FIXED; i++)
pmc_stop_counter(&pmu->fixed_counters[i]);
pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status =
pmu->global_ovf_ctrl = 0;
} }
void kvm_pmu_init(struct kvm_vcpu *vcpu) void kvm_pmu_init(struct kvm_vcpu *vcpu)
{ {
int i;
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
memset(pmu, 0, sizeof(*pmu)); memset(pmu, 0, sizeof(*pmu));
for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) { kvm_x86_ops->pmu_ops->init(vcpu);
pmu->gp_counters[i].type = KVM_PMC_GP;
pmu->gp_counters[i].vcpu = vcpu;
pmu->gp_counters[i].idx = i;
}
for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) {
pmu->fixed_counters[i].type = KVM_PMC_FIXED;
pmu->fixed_counters[i].vcpu = vcpu;
pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED;
}
init_irq_work(&pmu->irq_work, kvm_pmi_trigger_fn); init_irq_work(&pmu->irq_work, kvm_pmi_trigger_fn);
kvm_pmu_refresh(vcpu); kvm_pmu_refresh(vcpu);
} }
......
...@@ -5,12 +5,102 @@ ...@@ -5,12 +5,102 @@
#define pmu_to_vcpu(pmu) (container_of((pmu), struct kvm_vcpu, arch.pmu)) #define pmu_to_vcpu(pmu) (container_of((pmu), struct kvm_vcpu, arch.pmu))
#define pmc_to_pmu(pmc) (&(pmc)->vcpu->arch.pmu) #define pmc_to_pmu(pmc) (&(pmc)->vcpu->arch.pmu)
/* retrieve the 4 bits for EN and PMI out of IA32_FIXED_CTR_CTRL */
#define fixed_ctrl_field(ctrl_reg, idx) (((ctrl_reg) >> ((idx)*4)) & 0xf)
struct kvm_event_hw_type_mapping { struct kvm_event_hw_type_mapping {
u8 eventsel; u8 eventsel;
u8 unit_mask; u8 unit_mask;
unsigned event_type; unsigned event_type;
}; };
struct kvm_pmu_ops {
unsigned (*find_arch_event)(struct kvm_pmu *pmu, u8 event_select,
u8 unit_mask);
unsigned (*find_fixed_event)(int idx);
bool (*pmc_is_enabled)(struct kvm_pmc *pmc);
struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx);
struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, unsigned idx);
int (*is_valid_msr_idx)(struct kvm_vcpu *vcpu, unsigned idx);
bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr);
int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
void (*refresh)(struct kvm_vcpu *vcpu);
void (*init)(struct kvm_vcpu *vcpu);
void (*reset)(struct kvm_vcpu *vcpu);
};
static inline u64 pmc_bitmask(struct kvm_pmc *pmc)
{
struct kvm_pmu *pmu = pmc_to_pmu(pmc);
return pmu->counter_bitmask[pmc->type];
}
static inline u64 pmc_read_counter(struct kvm_pmc *pmc)
{
u64 counter, enabled, running;
counter = pmc->counter;
if (pmc->perf_event)
counter += perf_event_read_value(pmc->perf_event,
&enabled, &running);
/* FIXME: Scaling needed? */
return counter & pmc_bitmask(pmc);
}
static inline void pmc_stop_counter(struct kvm_pmc *pmc)
{
if (pmc->perf_event) {
pmc->counter = pmc_read_counter(pmc);
perf_event_release_kernel(pmc->perf_event);
pmc->perf_event = NULL;
}
}
static inline bool pmc_is_gp(struct kvm_pmc *pmc)
{
return pmc->type == KVM_PMC_GP;
}
static inline bool pmc_is_fixed(struct kvm_pmc *pmc)
{
return pmc->type == KVM_PMC_FIXED;
}
static inline bool pmc_is_enabled(struct kvm_pmc *pmc)
{
return kvm_x86_ops->pmu_ops->pmc_is_enabled(pmc);
}
/* returns general purpose PMC with the specified MSR. Note that it can be
* used for both PERFCTRn and EVNTSELn; that is why it accepts base as a
* paramenter to tell them apart.
*/
static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr,
u32 base)
{
if (msr >= base && msr < base + pmu->nr_arch_gp_counters)
return &pmu->gp_counters[msr - base];
return NULL;
}
/* returns fixed PMC with the specified MSR */
static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
{
int base = MSR_CORE_PERF_FIXED_CTR0;
if (msr >= base && msr < base + pmu->nr_arch_fixed_counters)
return &pmu->fixed_counters[msr - base];
return NULL;
}
void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel);
void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int fixed_idx);
void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx);
void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu); void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu);
void kvm_pmu_handle_event(struct kvm_vcpu *vcpu); void kvm_pmu_handle_event(struct kvm_vcpu *vcpu);
int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data); int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
...@@ -23,4 +113,6 @@ void kvm_pmu_reset(struct kvm_vcpu *vcpu); ...@@ -23,4 +113,6 @@ void kvm_pmu_reset(struct kvm_vcpu *vcpu);
void kvm_pmu_init(struct kvm_vcpu *vcpu); void kvm_pmu_init(struct kvm_vcpu *vcpu);
void kvm_pmu_destroy(struct kvm_vcpu *vcpu); void kvm_pmu_destroy(struct kvm_vcpu *vcpu);
extern struct kvm_pmu_ops intel_pmu_ops;
extern struct kvm_pmu_ops amd_pmu_ops;
#endif /* __KVM_X86_PMU_H */ #endif /* __KVM_X86_PMU_H */
/*
* KVM PMU support for AMD
*
* Copyright 2015, Red Hat, Inc. and/or its affiliates.
*
* Author:
* Wei Huang <wei@redhat.com>
*
* This work is licensed under the terms of the GNU GPL, version 2. See
* the COPYING file in the top-level directory.
*
* Implementation is based on pmu_intel.c file
*/
#include <linux/types.h>
#include <linux/kvm_host.h>
#include <linux/perf_event.h>
#include "x86.h"
#include "cpuid.h"
#include "lapic.h"
#include "pmu.h"
static unsigned amd_find_arch_event(struct kvm_pmu *pmu,
u8 event_select,
u8 unit_mask)
{
return PERF_COUNT_HW_MAX;
}
/* return PERF_COUNT_HW_MAX as AMD doesn't have fixed events */
static unsigned amd_find_fixed_event(int idx)
{
return PERF_COUNT_HW_MAX;
}
static bool amd_pmc_is_enabled(struct kvm_pmc *pmc)
{
return false;
}
static struct kvm_pmc *amd_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
{
return NULL;
}
/* returns 0 if idx's corresponding MSR exists; otherwise returns 1. */
static int amd_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx)
{
return 1;
}
/* idx is the ECX register of RDPMC instruction */
static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, unsigned idx)
{
return NULL;
}
static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
{
return false;
}
static int amd_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
{
return 1;
}
static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
return 1;
}
static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
{
}
static void amd_pmu_init(struct kvm_vcpu *vcpu)
{
}
static void amd_pmu_reset(struct kvm_vcpu *vcpu)
{
}
struct kvm_pmu_ops amd_pmu_ops = {
.find_arch_event = amd_find_arch_event,
.find_fixed_event = amd_find_fixed_event,
.pmc_is_enabled = amd_pmc_is_enabled,
.pmc_idx_to_pmc = amd_pmc_idx_to_pmc,
.msr_idx_to_pmc = amd_msr_idx_to_pmc,
.is_valid_msr_idx = amd_is_valid_msr_idx,
.is_valid_msr = amd_is_valid_msr,
.get_msr = amd_pmu_get_msr,
.set_msr = amd_pmu_set_msr,
.refresh = amd_pmu_refresh,
.init = amd_pmu_init,
.reset = amd_pmu_reset,
};
/*
* KVM PMU support for Intel CPUs
*
* Copyright 2011 Red Hat, Inc. and/or its affiliates.
*
* Authors:
* Avi Kivity <avi@redhat.com>
* Gleb Natapov <gleb@redhat.com>
*
* This work is licensed under the terms of the GNU GPL, version 2. See
* the COPYING file in the top-level directory.
*
*/
#include <linux/types.h>
#include <linux/kvm_host.h>
#include <linux/perf_event.h>
#include <asm/perf_event.h>
#include "x86.h"
#include "cpuid.h"
#include "lapic.h"
#include "pmu.h"
static struct kvm_event_hw_type_mapping intel_arch_events[] = {
/* Index must match CPUID 0x0A.EBX bit vector */
[0] = { 0x3c, 0x00, PERF_COUNT_HW_CPU_CYCLES },
[1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
[2] = { 0x3c, 0x01, PERF_COUNT_HW_BUS_CYCLES },
[3] = { 0x2e, 0x4f, PERF_COUNT_HW_CACHE_REFERENCES },
[4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES },
[5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
[6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
[7] = { 0x00, 0x30, PERF_COUNT_HW_REF_CPU_CYCLES },
};
/* mapping between fixed pmc index and intel_arch_events array */
static int fixed_pmc_events[] = {1, 0, 7};
static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
{
int i;
for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
u8 new_ctrl = fixed_ctrl_field(data, i);
u8 old_ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, i);
struct kvm_pmc *pmc;
pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i);
if (old_ctrl == new_ctrl)
continue;
reprogram_fixed_counter(pmc, new_ctrl, i);
}
pmu->fixed_ctr_ctrl = data;
}
/* function is called when global control register has been updated. */
static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
{
int bit;
u64 diff = pmu->global_ctrl ^ data;
pmu->global_ctrl = data;
for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX)
reprogram_counter(pmu, bit);
}
static unsigned intel_find_arch_event(struct kvm_pmu *pmu,
u8 event_select,
u8 unit_mask)
{
int i;
for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++)
if (intel_arch_events[i].eventsel == event_select
&& intel_arch_events[i].unit_mask == unit_mask
&& (pmu->available_event_types & (1 << i)))
break;
if (i == ARRAY_SIZE(intel_arch_events))
return PERF_COUNT_HW_MAX;
return intel_arch_events[i].event_type;
}
static unsigned intel_find_fixed_event(int idx)
{
if (idx >= ARRAY_SIZE(fixed_pmc_events))
return PERF_COUNT_HW_MAX;
return intel_arch_events[fixed_pmc_events[idx]].event_type;
}
/* check if a PMC is enabled by comparising it with globl_ctrl bits. */
static bool intel_pmc_is_enabled(struct kvm_pmc *pmc)
{
struct kvm_pmu *pmu = pmc_to_pmu(pmc);
return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
}
static struct kvm_pmc *intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
{
if (pmc_idx < INTEL_PMC_IDX_FIXED)
return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + pmc_idx,
MSR_P6_EVNTSEL0);
else {
u32 idx = pmc_idx - INTEL_PMC_IDX_FIXED;
return get_fixed_pmc(pmu, idx + MSR_CORE_PERF_FIXED_CTR0);
}
}
/* returns 0 if idx's corresponding MSR exists; otherwise returns 1. */
static int intel_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx)
{
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
bool fixed = idx & (1u << 30);
idx &= ~(3u << 30);
return (!fixed && idx >= pmu->nr_arch_gp_counters) ||
(fixed && idx >= pmu->nr_arch_fixed_counters);
}
static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu,
unsigned idx)
{
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
bool fixed = idx & (1u << 30);
struct kvm_pmc *counters;
idx &= ~(3u << 30);
if (!fixed && idx >= pmu->nr_arch_gp_counters)
return NULL;
if (fixed && idx >= pmu->nr_arch_fixed_counters)
return NULL;
counters = fixed ? pmu->fixed_counters : pmu->gp_counters;
return &counters[idx];
}
static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
{
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
int ret;
switch (msr) {
case MSR_CORE_PERF_FIXED_CTR_CTRL:
case MSR_CORE_PERF_GLOBAL_STATUS:
case MSR_CORE_PERF_GLOBAL_CTRL:
case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
ret = pmu->version > 1;
break;
default:
ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) ||
get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) ||
get_fixed_pmc(pmu, msr);
break;
}
return ret;
}
static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
{
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
struct kvm_pmc *pmc;
switch (msr) {
case MSR_CORE_PERF_FIXED_CTR_CTRL:
*data = pmu->fixed_ctr_ctrl;
return 0;
case MSR_CORE_PERF_GLOBAL_STATUS:
*data = pmu->global_status;
return 0;
case MSR_CORE_PERF_GLOBAL_CTRL:
*data = pmu->global_ctrl;
return 0;
case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
*data = pmu->global_ovf_ctrl;
return 0;
default:
if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
(pmc = get_fixed_pmc(pmu, msr))) {
*data = pmc_read_counter(pmc);
return 0;
} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
*data = pmc->eventsel;
return 0;
}
}
return 1;
}
static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
struct kvm_pmc *pmc;
u32 msr = msr_info->index;
u64 data = msr_info->data;
switch (msr) {
case MSR_CORE_PERF_FIXED_CTR_CTRL:
if (pmu->fixed_ctr_ctrl == data)
return 0;
if (!(data & 0xfffffffffffff444ull)) {
reprogram_fixed_counters(pmu, data);
return 0;
}
break;
case MSR_CORE_PERF_GLOBAL_STATUS:
if (msr_info->host_initiated) {
pmu->global_status = data;
return 0;
}
break; /* RO MSR */
case MSR_CORE_PERF_GLOBAL_CTRL:
if (pmu->global_ctrl == data)
return 0;
if (!(data & pmu->global_ctrl_mask)) {
global_ctrl_changed(pmu, data);
return 0;
}
break;
case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
if (!(data & (pmu->global_ctrl_mask & ~(3ull<<62)))) {
if (!msr_info->host_initiated)
pmu->global_status &= ~data;
pmu->global_ovf_ctrl = data;
return 0;
}
break;
default:
if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
(pmc = get_fixed_pmc(pmu, msr))) {
if (!msr_info->host_initiated)
data = (s64)(s32)data;
pmc->counter += data - pmc_read_counter(pmc);
return 0;
} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
if (data == pmc->eventsel)
return 0;
if (!(data & pmu->reserved_bits)) {
reprogram_gp_counter(pmc, data);
return 0;
}
}
}
return 1;
}
static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
{
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
struct kvm_cpuid_entry2 *entry;
union cpuid10_eax eax;
union cpuid10_edx edx;
pmu->nr_arch_gp_counters = 0;
pmu->nr_arch_fixed_counters = 0;
pmu->counter_bitmask[KVM_PMC_GP] = 0;
pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
pmu->version = 0;
pmu->reserved_bits = 0xffffffff00200000ull;
entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
if (!entry)
return;
eax.full = entry->eax;
edx.full = entry->edx;
pmu->version = eax.split.version_id;
if (!pmu->version)
return;
pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters,
INTEL_PMC_MAX_GENERIC);
pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1;
pmu->available_event_types = ~entry->ebx &
((1ull << eax.split.mask_length) - 1);
if (pmu->version == 1) {
pmu->nr_arch_fixed_counters = 0;
} else {
pmu->nr_arch_fixed_counters =
min_t(int, edx.split.num_counters_fixed,
INTEL_PMC_MAX_FIXED);
pmu->counter_bitmask[KVM_PMC_FIXED] =
((u64)1 << edx.split.bit_width_fixed) - 1;
}
pmu->global_ctrl = ((1 << pmu->nr_arch_gp_counters) - 1) |
(((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED);
pmu->global_ctrl_mask = ~pmu->global_ctrl;
entry = kvm_find_cpuid_entry(vcpu, 7, 0);
if (entry &&
(boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) &&
(entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM)))
pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED;
}
static void intel_pmu_init(struct kvm_vcpu *vcpu)
{
int i;
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
pmu->gp_counters[i].type = KVM_PMC_GP;
pmu->gp_counters[i].vcpu = vcpu;
pmu->gp_counters[i].idx = i;
}
for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) {
pmu->fixed_counters[i].type = KVM_PMC_FIXED;
pmu->fixed_counters[i].vcpu = vcpu;
pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED;
}
}
static void intel_pmu_reset(struct kvm_vcpu *vcpu)
{
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
int i;
for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
struct kvm_pmc *pmc = &pmu->gp_counters[i];
pmc_stop_counter(pmc);
pmc->counter = pmc->eventsel = 0;
}
for (i = 0; i < INTEL_PMC_MAX_FIXED; i++)
pmc_stop_counter(&pmu->fixed_counters[i]);
pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status =
pmu->global_ovf_ctrl = 0;
}
struct kvm_pmu_ops intel_pmu_ops = {
.find_arch_event = intel_find_arch_event,
.find_fixed_event = intel_find_fixed_event,
.pmc_is_enabled = intel_pmc_is_enabled,
.pmc_idx_to_pmc = intel_pmc_idx_to_pmc,
.msr_idx_to_pmc = intel_msr_idx_to_pmc,
.is_valid_msr_idx = intel_is_valid_msr_idx,
.is_valid_msr = intel_is_valid_msr,
.get_msr = intel_pmu_get_msr,
.set_msr = intel_pmu_set_msr,
.refresh = intel_pmu_refresh,
.init = intel_pmu_init,
.reset = intel_pmu_reset,
};
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include "kvm_cache_regs.h" #include "kvm_cache_regs.h"
#include "x86.h" #include "x86.h"
#include "cpuid.h" #include "cpuid.h"
#include "pmu.h"
#include <linux/module.h> #include <linux/module.h>
#include <linux/mod_devicetable.h> #include <linux/mod_devicetable.h>
...@@ -4457,6 +4458,8 @@ static struct kvm_x86_ops svm_x86_ops = { ...@@ -4457,6 +4458,8 @@ static struct kvm_x86_ops svm_x86_ops = {
.handle_external_intr = svm_handle_external_intr, .handle_external_intr = svm_handle_external_intr,
.sched_in = svm_sched_in, .sched_in = svm_sched_in,
.pmu_ops = &amd_pmu_ops,
}; };
static int __init svm_init(void) static int __init svm_init(void)
......
...@@ -48,6 +48,7 @@ ...@@ -48,6 +48,7 @@
#include <asm/apic.h> #include <asm/apic.h>
#include "trace.h" #include "trace.h"
#include "pmu.h"
#define __ex(x) __kvm_handle_fault_on_reboot(x) #define __ex(x) __kvm_handle_fault_on_reboot(x)
#define __ex_clear(x, reg) \ #define __ex_clear(x, reg) \
...@@ -10419,6 +10420,8 @@ static struct kvm_x86_ops vmx_x86_ops = { ...@@ -10419,6 +10420,8 @@ static struct kvm_x86_ops vmx_x86_ops = {
.slot_disable_log_dirty = vmx_slot_disable_log_dirty, .slot_disable_log_dirty = vmx_slot_disable_log_dirty,
.flush_log_dirty = vmx_flush_log_dirty, .flush_log_dirty = vmx_flush_log_dirty,
.enable_log_dirty_pt_masked = vmx_enable_log_dirty_pt_masked, .enable_log_dirty_pt_masked = vmx_enable_log_dirty_pt_masked,
.pmu_ops = &intel_pmu_ops,
}; };
static int __init vmx_init(void) static int __init vmx_init(void)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment