Commit fb121aaf authored by Like Xu's avatar Like Xu Committed by Paolo Bonzini

KVM: x86/pmu: Drop "u64 eventsel" for reprogram_gp_counter()

Because inside reprogram_gp_counter() it is bound to assign the requested
eventel to pmc->eventsel, this assignment step can be moved forward, thus
simplifying the passing of parameters to "struct kvm_pmc *pmc" only.

No functional change intended.
Signed-off-by: default avatarLike Xu <likexu@tencent.com>
Message-Id: <20220518132512.37864-6-likexu@tencent.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent a40239b4
...@@ -283,17 +283,16 @@ static bool check_pmu_event_filter(struct kvm_pmc *pmc) ...@@ -283,17 +283,16 @@ static bool check_pmu_event_filter(struct kvm_pmc *pmc)
return allow_event; return allow_event;
} }
void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel) void reprogram_gp_counter(struct kvm_pmc *pmc)
{ {
u64 config; u64 config;
u32 type = PERF_TYPE_RAW; u32 type = PERF_TYPE_RAW;
struct kvm_pmu *pmu = pmc_to_pmu(pmc); struct kvm_pmu *pmu = pmc_to_pmu(pmc);
u64 eventsel = pmc->eventsel;
if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL) if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL)
printk_once("kvm pmu: pin control bit is ignored\n"); printk_once("kvm pmu: pin control bit is ignored\n");
pmc->eventsel = eventsel;
pmc_pause_counter(pmc); pmc_pause_counter(pmc);
if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE) || !pmc_is_enabled(pmc)) if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE) || !pmc_is_enabled(pmc))
...@@ -358,7 +357,7 @@ EXPORT_SYMBOL_GPL(reprogram_fixed_counter); ...@@ -358,7 +357,7 @@ EXPORT_SYMBOL_GPL(reprogram_fixed_counter);
void reprogram_counter(struct kvm_pmc *pmc) void reprogram_counter(struct kvm_pmc *pmc)
{ {
if (pmc_is_gp(pmc)) if (pmc_is_gp(pmc))
reprogram_gp_counter(pmc, pmc->eventsel); reprogram_gp_counter(pmc);
else { else {
int idx = pmc->idx - INTEL_PMC_IDX_FIXED; int idx = pmc->idx - INTEL_PMC_IDX_FIXED;
u8 ctrl = fixed_ctrl_field(pmc_to_pmu(pmc)->fixed_ctr_ctrl, idx); u8 ctrl = fixed_ctrl_field(pmc_to_pmu(pmc)->fixed_ctr_ctrl, idx);
......
...@@ -173,7 +173,7 @@ static inline void kvm_init_pmu_capability(void) ...@@ -173,7 +173,7 @@ static inline void kvm_init_pmu_capability(void)
KVM_PMC_MAX_FIXED); KVM_PMC_MAX_FIXED);
} }
void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel); void reprogram_gp_counter(struct kvm_pmc *pmc);
void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int fixed_idx); void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int fixed_idx);
void reprogram_counter(struct kvm_pmc *pmc); void reprogram_counter(struct kvm_pmc *pmc);
......
...@@ -286,8 +286,10 @@ static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -286,8 +286,10 @@ static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL); pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
if (pmc) { if (pmc) {
data &= ~pmu->reserved_bits; data &= ~pmu->reserved_bits;
if (data != pmc->eventsel) if (data != pmc->eventsel) {
reprogram_gp_counter(pmc, data); pmc->eventsel = data;
reprogram_gp_counter(pmc);
}
return 0; return 0;
} }
......
...@@ -492,7 +492,8 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -492,7 +492,8 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
(pmu->raw_event_mask & HSW_IN_TX_CHECKPOINTED)) (pmu->raw_event_mask & HSW_IN_TX_CHECKPOINTED))
reserved_bits ^= HSW_IN_TX_CHECKPOINTED; reserved_bits ^= HSW_IN_TX_CHECKPOINTED;
if (!(data & reserved_bits)) { if (!(data & reserved_bits)) {
reprogram_gp_counter(pmc, data); pmc->eventsel = data;
reprogram_gp_counter(pmc);
return 0; return 0;
} }
} else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, false)) } else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, false))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment