Commit 14620149 authored by Paolo Bonzini's avatar Paolo Bonzini

Merge branch 'kvm-master' into HEAD

x86:
* Use SRCU to protect zap in __kvm_set_or_clear_apicv_inhibit()

* Make argument order consistent for kvcalloc()

* Userspace API fixes for DEBUGCTL and LBRs
parents 8e5423e9 8670866b
...@@ -1338,7 +1338,7 @@ int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid, ...@@ -1338,7 +1338,7 @@ int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
if (sanity_check_entries(entries, cpuid->nent, type)) if (sanity_check_entries(entries, cpuid->nent, type))
return -EINVAL; return -EINVAL;
array.entries = kvcalloc(sizeof(struct kvm_cpuid_entry2), cpuid->nent, GFP_KERNEL); array.entries = kvcalloc(cpuid->nent, sizeof(struct kvm_cpuid_entry2), GFP_KERNEL);
if (!array.entries) if (!array.entries)
return -ENOMEM; return -ENOMEM;
......
...@@ -24,8 +24,6 @@ extern int __read_mostly pt_mode; ...@@ -24,8 +24,6 @@ extern int __read_mostly pt_mode;
#define PMU_CAP_FW_WRITES (1ULL << 13) #define PMU_CAP_FW_WRITES (1ULL << 13)
#define PMU_CAP_LBR_FMT 0x3f #define PMU_CAP_LBR_FMT 0x3f
#define DEBUGCTLMSR_LBR_MASK (DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI)
struct nested_vmx_msrs { struct nested_vmx_msrs {
/* /*
* We only store the "true" versions of the VMX capability MSRs. We * We only store the "true" versions of the VMX capability MSRs. We
...@@ -400,6 +398,7 @@ static inline bool vmx_pebs_supported(void) ...@@ -400,6 +398,7 @@ static inline bool vmx_pebs_supported(void)
static inline u64 vmx_get_perf_capabilities(void) static inline u64 vmx_get_perf_capabilities(void)
{ {
u64 perf_cap = PMU_CAP_FW_WRITES; u64 perf_cap = PMU_CAP_FW_WRITES;
struct x86_pmu_lbr lbr;
u64 host_perf_cap = 0; u64 host_perf_cap = 0;
if (!enable_pmu) if (!enable_pmu)
...@@ -408,7 +407,8 @@ static inline u64 vmx_get_perf_capabilities(void) ...@@ -408,7 +407,8 @@ static inline u64 vmx_get_perf_capabilities(void)
if (boot_cpu_has(X86_FEATURE_PDCM)) if (boot_cpu_has(X86_FEATURE_PDCM))
rdmsrl(MSR_IA32_PERF_CAPABILITIES, host_perf_cap); rdmsrl(MSR_IA32_PERF_CAPABILITIES, host_perf_cap);
perf_cap |= host_perf_cap & PMU_CAP_LBR_FMT; if (x86_perf_get_lbr(&lbr) >= 0 && lbr.nr)
perf_cap |= host_perf_cap & PMU_CAP_LBR_FMT;
if (vmx_pebs_supported()) { if (vmx_pebs_supported()) {
perf_cap |= host_perf_cap & PERF_CAP_PEBS_MASK; perf_cap |= host_perf_cap & PERF_CAP_PEBS_MASK;
...@@ -419,19 +419,6 @@ static inline u64 vmx_get_perf_capabilities(void) ...@@ -419,19 +419,6 @@ static inline u64 vmx_get_perf_capabilities(void)
return perf_cap; return perf_cap;
} }
static inline u64 vmx_supported_debugctl(void)
{
u64 debugctl = 0;
if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT))
debugctl |= DEBUGCTLMSR_BUS_LOCK_DETECT;
if (vmx_get_perf_capabilities() & PMU_CAP_LBR_FMT)
debugctl |= DEBUGCTLMSR_LBR_MASK;
return debugctl;
}
static inline bool cpu_has_notify_vmexit(void) static inline bool cpu_has_notify_vmexit(void)
{ {
return vmcs_config.cpu_based_2nd_exec_ctrl & return vmcs_config.cpu_based_2nd_exec_ctrl &
......
...@@ -2021,15 +2021,17 @@ static u64 nested_vmx_truncate_sysenter_addr(struct kvm_vcpu *vcpu, ...@@ -2021,15 +2021,17 @@ static u64 nested_vmx_truncate_sysenter_addr(struct kvm_vcpu *vcpu,
return (unsigned long)data; return (unsigned long)data;
} }
static u64 vcpu_supported_debugctl(struct kvm_vcpu *vcpu) static u64 vmx_get_supported_debugctl(struct kvm_vcpu *vcpu, bool host_initiated)
{ {
u64 debugctl = vmx_supported_debugctl(); u64 debugctl = 0;
if (!intel_pmu_lbr_is_enabled(vcpu)) if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT) &&
debugctl &= ~DEBUGCTLMSR_LBR_MASK; (host_initiated || guest_cpuid_has(vcpu, X86_FEATURE_BUS_LOCK_DETECT)))
debugctl |= DEBUGCTLMSR_BUS_LOCK_DETECT;
if (!guest_cpuid_has(vcpu, X86_FEATURE_BUS_LOCK_DETECT)) if ((vmx_get_perf_capabilities() & PMU_CAP_LBR_FMT) &&
debugctl &= ~DEBUGCTLMSR_BUS_LOCK_DETECT; (host_initiated || intel_pmu_lbr_is_enabled(vcpu)))
debugctl |= DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI;
return debugctl; return debugctl;
} }
...@@ -2103,7 +2105,9 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -2103,7 +2105,9 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
vmcs_writel(GUEST_SYSENTER_ESP, data); vmcs_writel(GUEST_SYSENTER_ESP, data);
break; break;
case MSR_IA32_DEBUGCTLMSR: { case MSR_IA32_DEBUGCTLMSR: {
u64 invalid = data & ~vcpu_supported_debugctl(vcpu); u64 invalid;
invalid = data & ~vmx_get_supported_debugctl(vcpu, msr_info->host_initiated);
if (invalid & (DEBUGCTLMSR_BTF|DEBUGCTLMSR_LBR)) { if (invalid & (DEBUGCTLMSR_BTF|DEBUGCTLMSR_LBR)) {
if (report_ignored_msrs) if (report_ignored_msrs)
vcpu_unimpl(vcpu, "%s: BTF|LBR in IA32_DEBUGCTLMSR 0x%llx, nop\n", vcpu_unimpl(vcpu, "%s: BTF|LBR in IA32_DEBUGCTLMSR 0x%llx, nop\n",
......
...@@ -10404,7 +10404,10 @@ void __kvm_set_or_clear_apicv_inhibit(struct kvm *kvm, ...@@ -10404,7 +10404,10 @@ void __kvm_set_or_clear_apicv_inhibit(struct kvm *kvm,
kvm->arch.apicv_inhibit_reasons = new; kvm->arch.apicv_inhibit_reasons = new;
if (new) { if (new) {
unsigned long gfn = gpa_to_gfn(APIC_DEFAULT_PHYS_BASE); unsigned long gfn = gpa_to_gfn(APIC_DEFAULT_PHYS_BASE);
int idx = srcu_read_lock(&kvm->srcu);
kvm_zap_gfn_range(kvm, gfn, gfn+1); kvm_zap_gfn_range(kvm, gfn, gfn+1);
srcu_read_unlock(&kvm->srcu, idx);
} }
} else { } else {
kvm->arch.apicv_inhibit_reasons = new; kvm->arch.apicv_inhibit_reasons = new;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment