Commit 66061740 authored by Marios Pomonis's avatar Marios Pomonis Committed by Paolo Bonzini

KVM: x86: Protect pmu_intel.c from Spectre-v1/L1TF attacks

This fixes Spectre-v1/L1TF vulnerabilities in intel_find_fixed_event()
and intel_rdpmc_ecx_to_pmc().
kvm_rdpmc() (ancestor of intel_find_fixed_event()) and
reprogram_fixed_counter() (ancestor of intel_rdpmc_ecx_to_pmc()) are
exported symbols so KVM should treat them conservatively from a security
perspective.

Fixes: 25462f7f ("KVM: x86/vPMU: Define kvm_pmu_ops to support vPMU function dispatch")
Signed-off-by: default avatarNick Finco <nifi@google.com>
Signed-off-by: default avatarMarios Pomonis <pomonis@google.com>
Reviewed-by: default avatarAndrew Honig <ahonig@google.com>
Cc: stable@vger.kernel.org
Reviewed-by: default avatarJim Mattson <jmattson@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent ea740059
...@@ -86,10 +86,14 @@ static unsigned intel_find_arch_event(struct kvm_pmu *pmu, ...@@ -86,10 +86,14 @@ static unsigned intel_find_arch_event(struct kvm_pmu *pmu,
static unsigned intel_find_fixed_event(int idx) static unsigned intel_find_fixed_event(int idx)
{ {
if (idx >= ARRAY_SIZE(fixed_pmc_events)) u32 event;
size_t size = ARRAY_SIZE(fixed_pmc_events);
if (idx >= size)
return PERF_COUNT_HW_MAX; return PERF_COUNT_HW_MAX;
return intel_arch_events[fixed_pmc_events[idx]].event_type; event = fixed_pmc_events[array_index_nospec(idx, size)];
return intel_arch_events[event].event_type;
} }
/* check if a PMC is enabled by comparing it with globl_ctrl bits. */ /* check if a PMC is enabled by comparing it with globl_ctrl bits. */
...@@ -130,16 +134,20 @@ static struct kvm_pmc *intel_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu, ...@@ -130,16 +134,20 @@ static struct kvm_pmc *intel_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
bool fixed = idx & (1u << 30); bool fixed = idx & (1u << 30);
struct kvm_pmc *counters; struct kvm_pmc *counters;
unsigned int num_counters;
idx &= ~(3u << 30); idx &= ~(3u << 30);
if (!fixed && idx >= pmu->nr_arch_gp_counters) if (fixed) {
return NULL; counters = pmu->fixed_counters;
if (fixed && idx >= pmu->nr_arch_fixed_counters) num_counters = pmu->nr_arch_fixed_counters;
} else {
counters = pmu->gp_counters;
num_counters = pmu->nr_arch_gp_counters;
}
if (idx >= num_counters)
return NULL; return NULL;
counters = fixed ? pmu->fixed_counters : pmu->gp_counters;
*mask &= pmu->counter_bitmask[fixed ? KVM_PMC_FIXED : KVM_PMC_GP]; *mask &= pmu->counter_bitmask[fixed ? KVM_PMC_FIXED : KVM_PMC_GP];
return &counters[array_index_nospec(idx, num_counters)];
return &counters[idx];
} }
static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment