Commit e7429f96 authored by Marios Pomonis's avatar Marios Pomonis Committed by Khalid Elmously

KVM: x86: Protect MSR-based index computations in pmu.h from Spectre-v1/L1TF attacks

BugLink: https://bugs.launchpad.net/bugs/1864775

commit 13c5183a upstream.

This fixes a Spectre-v1/L1TF vulnerability in the get_gp_pmc() and
get_fixed_pmc() functions.
They both contain index computations based on the (attacker-controlled)
MSR number.

Fixes: 25462f7f ("KVM: x86/vPMU: Define kvm_pmu_ops to support vPMU function dispatch")
Signed-off-by: default avatarNick Finco <nifi@google.com>
Signed-off-by: default avatarMarios Pomonis <pomonis@google.com>
Reviewed-by: default avatarAndrew Honig <ahonig@google.com>
Cc: stable@vger.kernel.org
Reviewed-by: default avatarJim Mattson <jmattson@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: default avatarKhalid Elmously <khalid.elmously@canonical.com>
Signed-off-by: default avatarKleber Sacilotto de Souza <kleber.souza@canonical.com>
parent 5c70b7e5
#ifndef __KVM_X86_PMU_H #ifndef __KVM_X86_PMU_H
#define __KVM_X86_PMU_H #define __KVM_X86_PMU_H
#include <linux/nospec.h>
#define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu) #define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu)
#define pmu_to_vcpu(pmu) (container_of((pmu), struct kvm_vcpu, arch.pmu)) #define pmu_to_vcpu(pmu) (container_of((pmu), struct kvm_vcpu, arch.pmu))
#define pmc_to_pmu(pmc) (&(pmc)->vcpu->arch.pmu) #define pmc_to_pmu(pmc) (&(pmc)->vcpu->arch.pmu)
...@@ -80,8 +82,12 @@ static inline bool pmc_is_enabled(struct kvm_pmc *pmc) ...@@ -80,8 +82,12 @@ static inline bool pmc_is_enabled(struct kvm_pmc *pmc)
static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr, static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr,
u32 base) u32 base)
{ {
if (msr >= base && msr < base + pmu->nr_arch_gp_counters) if (msr >= base && msr < base + pmu->nr_arch_gp_counters) {
return &pmu->gp_counters[msr - base]; u32 index = array_index_nospec(msr - base,
pmu->nr_arch_gp_counters);
return &pmu->gp_counters[index];
}
return NULL; return NULL;
} }
...@@ -91,8 +97,12 @@ static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr) ...@@ -91,8 +97,12 @@ static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
{ {
int base = MSR_CORE_PERF_FIXED_CTR0; int base = MSR_CORE_PERF_FIXED_CTR0;
if (msr >= base && msr < base + pmu->nr_arch_fixed_counters) if (msr >= base && msr < base + pmu->nr_arch_fixed_counters) {
return &pmu->fixed_counters[msr - base]; u32 index = array_index_nospec(msr - base,
pmu->nr_arch_fixed_counters);
return &pmu->fixed_counters[index];
}
return NULL; return NULL;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment