Commit e5af058a authored by Wei Huang's avatar Wei Huang Committed by Paolo Bonzini

KVM: x86/vPMU: reorder PMU functions

Keep called functions closer to their callers, and init/destroy
functions next to each other.
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent e84cfe4c
...@@ -83,12 +83,6 @@ static struct kvm_pmc *global_idx_to_pmc(struct kvm_pmu *pmu, int idx) ...@@ -83,12 +83,6 @@ static struct kvm_pmc *global_idx_to_pmc(struct kvm_pmu *pmu, int idx)
return get_fixed_pmc_idx(pmu, idx - INTEL_PMC_IDX_FIXED); return get_fixed_pmc_idx(pmu, idx - INTEL_PMC_IDX_FIXED);
} }
void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
{
if (vcpu->arch.apic)
kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC);
}
static void kvm_pmi_trigger_fn(struct irq_work *irq_work) static void kvm_pmi_trigger_fn(struct irq_work *irq_work)
{ {
struct kvm_pmu *pmu = container_of(irq_work, struct kvm_pmu, irq_work); struct kvm_pmu *pmu = container_of(irq_work, struct kvm_pmu, irq_work);
...@@ -324,6 +318,65 @@ static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data) ...@@ -324,6 +318,65 @@ static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
reprogram_counter(pmu, bit); reprogram_counter(pmu, bit);
} }
void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
{
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
u64 bitmask;
int bit;
bitmask = pmu->reprogram_pmi;
for_each_set_bit(bit, (unsigned long *)&bitmask, X86_PMC_IDX_MAX) {
struct kvm_pmc *pmc = global_idx_to_pmc(pmu, bit);
if (unlikely(!pmc || !pmc->perf_event)) {
clear_bit(bit, (unsigned long *)&pmu->reprogram_pmi);
continue;
}
reprogram_counter(pmu, bit);
}
}
/* check if idx is a valid index to access PMU */
int kvm_pmu_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx)
{
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
bool fixed = idx & (1u << 30);
idx &= ~(3u << 30);
return (!fixed && idx >= pmu->nr_arch_gp_counters) ||
(fixed && idx >= pmu->nr_arch_fixed_counters);
}
int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
{
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
bool fast_mode = idx & (1u << 31);
bool fixed = idx & (1u << 30);
struct kvm_pmc *counters;
u64 ctr_val;
idx &= ~(3u << 30);
if (!fixed && idx >= pmu->nr_arch_gp_counters)
return 1;
if (fixed && idx >= pmu->nr_arch_fixed_counters)
return 1;
counters = fixed ? pmu->fixed_counters : pmu->gp_counters;
ctr_val = pmc_read_counter(&counters[idx]);
if (fast_mode)
ctr_val = (u32)ctr_val;
*data = ctr_val;
return 0;
}
void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
{
if (vcpu->arch.apic)
kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC);
}
bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
{ {
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
...@@ -433,39 +486,6 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -433,39 +486,6 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return 1; return 1;
} }
/* check if idx is a valid index to access PMU */
int kvm_pmu_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx)
{
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
bool fixed = idx & (1u << 30);
idx &= ~(3u << 30);
return (!fixed && idx >= pmu->nr_arch_gp_counters) ||
(fixed && idx >= pmu->nr_arch_fixed_counters);
}
int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
{
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
bool fast_mode = idx & (1u << 31);
bool fixed = idx & (1u << 30);
struct kvm_pmc *counters;
u64 ctr_val;
idx &= ~(3u << 30);
if (!fixed && idx >= pmu->nr_arch_gp_counters)
return 1;
if (fixed && idx >= pmu->nr_arch_fixed_counters)
return 1;
counters = fixed ? pmu->fixed_counters : pmu->gp_counters;
ctr_val = pmc_read_counter(&counters[idx]);
if (fast_mode)
ctr_val = (u32)ctr_val;
*data = ctr_val;
return 0;
}
/* refresh PMU settings. This function generally is called when underlying /* refresh PMU settings. This function generally is called when underlying
* settings are changed (such as changes of PMU CPUID by guest VMs), which * settings are changed (such as changes of PMU CPUID by guest VMs), which
* should rarely happen. * should rarely happen.
...@@ -521,26 +541,6 @@ void kvm_pmu_refresh(struct kvm_vcpu *vcpu) ...@@ -521,26 +541,6 @@ void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED; pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED;
} }
void kvm_pmu_init(struct kvm_vcpu *vcpu)
{
int i;
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
memset(pmu, 0, sizeof(*pmu));
for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
pmu->gp_counters[i].type = KVM_PMC_GP;
pmu->gp_counters[i].vcpu = vcpu;
pmu->gp_counters[i].idx = i;
}
for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) {
pmu->fixed_counters[i].type = KVM_PMC_FIXED;
pmu->fixed_counters[i].vcpu = vcpu;
pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED;
}
init_irq_work(&pmu->irq_work, kvm_pmi_trigger_fn);
kvm_pmu_refresh(vcpu);
}
void kvm_pmu_reset(struct kvm_vcpu *vcpu) void kvm_pmu_reset(struct kvm_vcpu *vcpu)
{ {
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
...@@ -560,27 +560,27 @@ void kvm_pmu_reset(struct kvm_vcpu *vcpu) ...@@ -560,27 +560,27 @@ void kvm_pmu_reset(struct kvm_vcpu *vcpu)
pmu->global_ovf_ctrl = 0; pmu->global_ovf_ctrl = 0;
} }
void kvm_pmu_destroy(struct kvm_vcpu *vcpu) void kvm_pmu_init(struct kvm_vcpu *vcpu)
{
kvm_pmu_reset(vcpu);
}
void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
{ {
int i;
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
u64 bitmask;
int bit;
bitmask = pmu->reprogram_pmi; memset(pmu, 0, sizeof(*pmu));
for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
for_each_set_bit(bit, (unsigned long *)&bitmask, X86_PMC_IDX_MAX) { pmu->gp_counters[i].type = KVM_PMC_GP;
struct kvm_pmc *pmc = global_idx_to_pmc(pmu, bit); pmu->gp_counters[i].vcpu = vcpu;
pmu->gp_counters[i].idx = i;
if (unlikely(!pmc || !pmc->perf_event)) {
clear_bit(bit, (unsigned long *)&pmu->reprogram_pmi);
continue;
} }
for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) {
reprogram_counter(pmu, bit); pmu->fixed_counters[i].type = KVM_PMC_FIXED;
pmu->fixed_counters[i].vcpu = vcpu;
pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED;
} }
init_irq_work(&pmu->irq_work, kvm_pmi_trigger_fn);
kvm_pmu_refresh(vcpu);
}
void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
{
kvm_pmu_reset(vcpu);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment